##// END OF EJS Templates
commitctx: simplify locking...
Matt Mackall -
r8398:a45eb410 default
parent child Browse files
Show More
@@ -1,2159 +1,2157 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset'))
21 capabilities = set(('lookup', 'changegroupsubset'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid == None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, parent=None,
132 def _tag(self, names, node, message, local, user, date, parent=None,
133 extra={}):
133 extra={}):
134 use_dirstate = parent is None
134 use_dirstate = parent is None
135
135
136 if isinstance(names, str):
136 if isinstance(names, str):
137 allchars = names
137 allchars = names
138 names = (names,)
138 names = (names,)
139 else:
139 else:
140 allchars = ''.join(names)
140 allchars = ''.join(names)
141 for c in self.tag_disallowed:
141 for c in self.tag_disallowed:
142 if c in allchars:
142 if c in allchars:
143 raise util.Abort(_('%r cannot be used in a tag name') % c)
143 raise util.Abort(_('%r cannot be used in a tag name') % c)
144
144
145 for name in names:
145 for name in names:
146 self.hook('pretag', throw=True, node=hex(node), tag=name,
146 self.hook('pretag', throw=True, node=hex(node), tag=name,
147 local=local)
147 local=local)
148
148
149 def writetags(fp, names, munge, prevtags):
149 def writetags(fp, names, munge, prevtags):
150 fp.seek(0, 2)
150 fp.seek(0, 2)
151 if prevtags and prevtags[-1] != '\n':
151 if prevtags and prevtags[-1] != '\n':
152 fp.write('\n')
152 fp.write('\n')
153 for name in names:
153 for name in names:
154 m = munge and munge(name) or name
154 m = munge and munge(name) or name
155 if self._tagstypecache and name in self._tagstypecache:
155 if self._tagstypecache and name in self._tagstypecache:
156 old = self.tagscache.get(name, nullid)
156 old = self.tagscache.get(name, nullid)
157 fp.write('%s %s\n' % (hex(old), m))
157 fp.write('%s %s\n' % (hex(old), m))
158 fp.write('%s %s\n' % (hex(node), m))
158 fp.write('%s %s\n' % (hex(node), m))
159 fp.close()
159 fp.close()
160
160
161 prevtags = ''
161 prevtags = ''
162 if local:
162 if local:
163 try:
163 try:
164 fp = self.opener('localtags', 'r+')
164 fp = self.opener('localtags', 'r+')
165 except IOError:
165 except IOError:
166 fp = self.opener('localtags', 'a')
166 fp = self.opener('localtags', 'a')
167 else:
167 else:
168 prevtags = fp.read()
168 prevtags = fp.read()
169
169
170 # local tags are stored in the current charset
170 # local tags are stored in the current charset
171 writetags(fp, names, None, prevtags)
171 writetags(fp, names, None, prevtags)
172 for name in names:
172 for name in names:
173 self.hook('tag', node=hex(node), tag=name, local=local)
173 self.hook('tag', node=hex(node), tag=name, local=local)
174 return
174 return
175
175
176 if use_dirstate:
176 if use_dirstate:
177 try:
177 try:
178 fp = self.wfile('.hgtags', 'rb+')
178 fp = self.wfile('.hgtags', 'rb+')
179 except IOError:
179 except IOError:
180 fp = self.wfile('.hgtags', 'ab')
180 fp = self.wfile('.hgtags', 'ab')
181 else:
181 else:
182 prevtags = fp.read()
182 prevtags = fp.read()
183 else:
183 else:
184 try:
184 try:
185 prevtags = self.filectx('.hgtags', parent).data()
185 prevtags = self.filectx('.hgtags', parent).data()
186 except error.LookupError:
186 except error.LookupError:
187 pass
187 pass
188 fp = self.wfile('.hgtags', 'wb')
188 fp = self.wfile('.hgtags', 'wb')
189 if prevtags:
189 if prevtags:
190 fp.write(prevtags)
190 fp.write(prevtags)
191
191
192 # committed tags are stored in UTF-8
192 # committed tags are stored in UTF-8
193 writetags(fp, names, encoding.fromlocal, prevtags)
193 writetags(fp, names, encoding.fromlocal, prevtags)
194
194
195 if use_dirstate and '.hgtags' not in self.dirstate:
195 if use_dirstate and '.hgtags' not in self.dirstate:
196 self.add(['.hgtags'])
196 self.add(['.hgtags'])
197
197
198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
199 extra=extra)
199 extra=extra)
200
200
201 for name in names:
201 for name in names:
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203
203
204 return tagnode
204 return tagnode
205
205
206 def tag(self, names, node, message, local, user, date):
206 def tag(self, names, node, message, local, user, date):
207 '''tag a revision with one or more symbolic names.
207 '''tag a revision with one or more symbolic names.
208
208
209 names is a list of strings or, when adding a single tag, names may be a
209 names is a list of strings or, when adding a single tag, names may be a
210 string.
210 string.
211
211
212 if local is True, the tags are stored in a per-repository file.
212 if local is True, the tags are stored in a per-repository file.
213 otherwise, they are stored in the .hgtags file, and a new
213 otherwise, they are stored in the .hgtags file, and a new
214 changeset is committed with the change.
214 changeset is committed with the change.
215
215
216 keyword arguments:
216 keyword arguments:
217
217
218 local: whether to store tags in non-version-controlled file
218 local: whether to store tags in non-version-controlled file
219 (default False)
219 (default False)
220
220
221 message: commit message to use if committing
221 message: commit message to use if committing
222
222
223 user: name of user to use if committing
223 user: name of user to use if committing
224
224
225 date: date tuple to use if committing'''
225 date: date tuple to use if committing'''
226
226
227 for x in self.status()[:5]:
227 for x in self.status()[:5]:
228 if '.hgtags' in x:
228 if '.hgtags' in x:
229 raise util.Abort(_('working copy of .hgtags is changed '
229 raise util.Abort(_('working copy of .hgtags is changed '
230 '(please commit .hgtags manually)'))
230 '(please commit .hgtags manually)'))
231
231
232 self.tags() # instantiate the cache
232 self.tags() # instantiate the cache
233 self._tag(names, node, message, local, user, date)
233 self._tag(names, node, message, local, user, date)
234
234
235 def tags(self):
235 def tags(self):
236 '''return a mapping of tag to node'''
236 '''return a mapping of tag to node'''
237 if self.tagscache:
237 if self.tagscache:
238 return self.tagscache
238 return self.tagscache
239
239
240 globaltags = {}
240 globaltags = {}
241 tagtypes = {}
241 tagtypes = {}
242
242
243 def readtags(lines, fn, tagtype):
243 def readtags(lines, fn, tagtype):
244 filetags = {}
244 filetags = {}
245 count = 0
245 count = 0
246
246
247 def warn(msg):
247 def warn(msg):
248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
249
249
250 for l in lines:
250 for l in lines:
251 count += 1
251 count += 1
252 if not l:
252 if not l:
253 continue
253 continue
254 s = l.split(" ", 1)
254 s = l.split(" ", 1)
255 if len(s) != 2:
255 if len(s) != 2:
256 warn(_("cannot parse entry"))
256 warn(_("cannot parse entry"))
257 continue
257 continue
258 node, key = s
258 node, key = s
259 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 key = encoding.tolocal(key.strip()) # stored in UTF-8
260 try:
260 try:
261 bin_n = bin(node)
261 bin_n = bin(node)
262 except TypeError:
262 except TypeError:
263 warn(_("node '%s' is not well formed") % node)
263 warn(_("node '%s' is not well formed") % node)
264 continue
264 continue
265 if bin_n not in self.changelog.nodemap:
265 if bin_n not in self.changelog.nodemap:
266 warn(_("tag '%s' refers to unknown node") % key)
266 warn(_("tag '%s' refers to unknown node") % key)
267 continue
267 continue
268
268
269 h = []
269 h = []
270 if key in filetags:
270 if key in filetags:
271 n, h = filetags[key]
271 n, h = filetags[key]
272 h.append(n)
272 h.append(n)
273 filetags[key] = (bin_n, h)
273 filetags[key] = (bin_n, h)
274
274
275 for k, nh in filetags.iteritems():
275 for k, nh in filetags.iteritems():
276 if k not in globaltags:
276 if k not in globaltags:
277 globaltags[k] = nh
277 globaltags[k] = nh
278 tagtypes[k] = tagtype
278 tagtypes[k] = tagtype
279 continue
279 continue
280
280
281 # we prefer the global tag if:
281 # we prefer the global tag if:
282 # it supercedes us OR
282 # it supercedes us OR
283 # mutual supercedes and it has a higher rank
283 # mutual supercedes and it has a higher rank
284 # otherwise we win because we're tip-most
284 # otherwise we win because we're tip-most
285 an, ah = nh
285 an, ah = nh
286 bn, bh = globaltags[k]
286 bn, bh = globaltags[k]
287 if (bn != an and an in bh and
287 if (bn != an and an in bh and
288 (bn not in ah or len(bh) > len(ah))):
288 (bn not in ah or len(bh) > len(ah))):
289 an = bn
289 an = bn
290 ah.extend([n for n in bh if n not in ah])
290 ah.extend([n for n in bh if n not in ah])
291 globaltags[k] = an, ah
291 globaltags[k] = an, ah
292 tagtypes[k] = tagtype
292 tagtypes[k] = tagtype
293
293
294 # read the tags file from each head, ending with the tip
294 # read the tags file from each head, ending with the tip
295 f = None
295 f = None
296 for rev, node, fnode in self._hgtagsnodes():
296 for rev, node, fnode in self._hgtagsnodes():
297 f = (f and f.filectx(fnode) or
297 f = (f and f.filectx(fnode) or
298 self.filectx('.hgtags', fileid=fnode))
298 self.filectx('.hgtags', fileid=fnode))
299 readtags(f.data().splitlines(), f, "global")
299 readtags(f.data().splitlines(), f, "global")
300
300
301 try:
301 try:
302 data = encoding.fromlocal(self.opener("localtags").read())
302 data = encoding.fromlocal(self.opener("localtags").read())
303 # localtags are stored in the local character set
303 # localtags are stored in the local character set
304 # while the internal tag table is stored in UTF-8
304 # while the internal tag table is stored in UTF-8
305 readtags(data.splitlines(), "localtags", "local")
305 readtags(data.splitlines(), "localtags", "local")
306 except IOError:
306 except IOError:
307 pass
307 pass
308
308
309 self.tagscache = {}
309 self.tagscache = {}
310 self._tagstypecache = {}
310 self._tagstypecache = {}
311 for k, nh in globaltags.iteritems():
311 for k, nh in globaltags.iteritems():
312 n = nh[0]
312 n = nh[0]
313 if n != nullid:
313 if n != nullid:
314 self.tagscache[k] = n
314 self.tagscache[k] = n
315 self._tagstypecache[k] = tagtypes[k]
315 self._tagstypecache[k] = tagtypes[k]
316 self.tagscache['tip'] = self.changelog.tip()
316 self.tagscache['tip'] = self.changelog.tip()
317 return self.tagscache
317 return self.tagscache
318
318
319 def tagtype(self, tagname):
319 def tagtype(self, tagname):
320 '''
320 '''
321 return the type of the given tag. result can be:
321 return the type of the given tag. result can be:
322
322
323 'local' : a local tag
323 'local' : a local tag
324 'global' : a global tag
324 'global' : a global tag
325 None : tag does not exist
325 None : tag does not exist
326 '''
326 '''
327
327
328 self.tags()
328 self.tags()
329
329
330 return self._tagstypecache.get(tagname)
330 return self._tagstypecache.get(tagname)
331
331
332 def _hgtagsnodes(self):
332 def _hgtagsnodes(self):
333 last = {}
333 last = {}
334 ret = []
334 ret = []
335 for node in reversed(self.heads()):
335 for node in reversed(self.heads()):
336 c = self[node]
336 c = self[node]
337 rev = c.rev()
337 rev = c.rev()
338 try:
338 try:
339 fnode = c.filenode('.hgtags')
339 fnode = c.filenode('.hgtags')
340 except error.LookupError:
340 except error.LookupError:
341 continue
341 continue
342 ret.append((rev, node, fnode))
342 ret.append((rev, node, fnode))
343 if fnode in last:
343 if fnode in last:
344 ret[last[fnode]] = None
344 ret[last[fnode]] = None
345 last[fnode] = len(ret) - 1
345 last[fnode] = len(ret) - 1
346 return [item for item in ret if item]
346 return [item for item in ret if item]
347
347
348 def tagslist(self):
348 def tagslist(self):
349 '''return a list of tags ordered by revision'''
349 '''return a list of tags ordered by revision'''
350 l = []
350 l = []
351 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
352 try:
352 try:
353 r = self.changelog.rev(n)
353 r = self.changelog.rev(n)
354 except:
354 except:
355 r = -2 # sort to the beginning of the list if unknown
355 r = -2 # sort to the beginning of the list if unknown
356 l.append((r, t, n))
356 l.append((r, t, n))
357 return [(t, n) for r, t, n in sorted(l)]
357 return [(t, n) for r, t, n in sorted(l)]
358
358
359 def nodetags(self, node):
359 def nodetags(self, node):
360 '''return the tags associated with a node'''
360 '''return the tags associated with a node'''
361 if not self.nodetagscache:
361 if not self.nodetagscache:
362 self.nodetagscache = {}
362 self.nodetagscache = {}
363 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
364 self.nodetagscache.setdefault(n, []).append(t)
364 self.nodetagscache.setdefault(n, []).append(t)
365 return self.nodetagscache.get(node, [])
365 return self.nodetagscache.get(node, [])
366
366
367 def _branchtags(self, partial, lrev):
367 def _branchtags(self, partial, lrev):
368 # TODO: rename this function?
368 # TODO: rename this function?
369 tiprev = len(self) - 1
369 tiprev = len(self) - 1
370 if lrev != tiprev:
370 if lrev != tiprev:
371 self._updatebranchcache(partial, lrev+1, tiprev+1)
371 self._updatebranchcache(partial, lrev+1, tiprev+1)
372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
373
373
374 return partial
374 return partial
375
375
376 def _branchheads(self):
376 def _branchheads(self):
377 tip = self.changelog.tip()
377 tip = self.changelog.tip()
378 if self.branchcache is not None and self._branchcachetip == tip:
378 if self.branchcache is not None and self._branchcachetip == tip:
379 return self.branchcache
379 return self.branchcache
380
380
381 oldtip = self._branchcachetip
381 oldtip = self._branchcachetip
382 self._branchcachetip = tip
382 self._branchcachetip = tip
383 if self.branchcache is None:
383 if self.branchcache is None:
384 self.branchcache = {} # avoid recursion in changectx
384 self.branchcache = {} # avoid recursion in changectx
385 else:
385 else:
386 self.branchcache.clear() # keep using the same dict
386 self.branchcache.clear() # keep using the same dict
387 if oldtip is None or oldtip not in self.changelog.nodemap:
387 if oldtip is None or oldtip not in self.changelog.nodemap:
388 partial, last, lrev = self._readbranchcache()
388 partial, last, lrev = self._readbranchcache()
389 else:
389 else:
390 lrev = self.changelog.rev(oldtip)
390 lrev = self.changelog.rev(oldtip)
391 partial = self._ubranchcache
391 partial = self._ubranchcache
392
392
393 self._branchtags(partial, lrev)
393 self._branchtags(partial, lrev)
394 # this private cache holds all heads (not just tips)
394 # this private cache holds all heads (not just tips)
395 self._ubranchcache = partial
395 self._ubranchcache = partial
396
396
397 # the branch cache is stored on disk as UTF-8, but in the local
397 # the branch cache is stored on disk as UTF-8, but in the local
398 # charset internally
398 # charset internally
399 for k, v in partial.iteritems():
399 for k, v in partial.iteritems():
400 self.branchcache[encoding.tolocal(k)] = v
400 self.branchcache[encoding.tolocal(k)] = v
401 return self.branchcache
401 return self.branchcache
402
402
403
403
404 def branchtags(self):
404 def branchtags(self):
405 '''return a dict where branch names map to the tipmost head of
405 '''return a dict where branch names map to the tipmost head of
406 the branch, open heads come before closed'''
406 the branch, open heads come before closed'''
407 bt = {}
407 bt = {}
408 for bn, heads in self._branchheads().iteritems():
408 for bn, heads in self._branchheads().iteritems():
409 head = None
409 head = None
410 for i in range(len(heads)-1, -1, -1):
410 for i in range(len(heads)-1, -1, -1):
411 h = heads[i]
411 h = heads[i]
412 if 'close' not in self.changelog.read(h)[5]:
412 if 'close' not in self.changelog.read(h)[5]:
413 head = h
413 head = h
414 break
414 break
415 # no open heads were found
415 # no open heads were found
416 if head is None:
416 if head is None:
417 head = heads[-1]
417 head = heads[-1]
418 bt[bn] = head
418 bt[bn] = head
419 return bt
419 return bt
420
420
421
421
422 def _readbranchcache(self):
422 def _readbranchcache(self):
423 partial = {}
423 partial = {}
424 try:
424 try:
425 f = self.opener("branchheads.cache")
425 f = self.opener("branchheads.cache")
426 lines = f.read().split('\n')
426 lines = f.read().split('\n')
427 f.close()
427 f.close()
428 except (IOError, OSError):
428 except (IOError, OSError):
429 return {}, nullid, nullrev
429 return {}, nullid, nullrev
430
430
431 try:
431 try:
432 last, lrev = lines.pop(0).split(" ", 1)
432 last, lrev = lines.pop(0).split(" ", 1)
433 last, lrev = bin(last), int(lrev)
433 last, lrev = bin(last), int(lrev)
434 if lrev >= len(self) or self[lrev].node() != last:
434 if lrev >= len(self) or self[lrev].node() != last:
435 # invalidate the cache
435 # invalidate the cache
436 raise ValueError('invalidating branch cache (tip differs)')
436 raise ValueError('invalidating branch cache (tip differs)')
437 for l in lines:
437 for l in lines:
438 if not l: continue
438 if not l: continue
439 node, label = l.split(" ", 1)
439 node, label = l.split(" ", 1)
440 partial.setdefault(label.strip(), []).append(bin(node))
440 partial.setdefault(label.strip(), []).append(bin(node))
441 except KeyboardInterrupt:
441 except KeyboardInterrupt:
442 raise
442 raise
443 except Exception, inst:
443 except Exception, inst:
444 if self.ui.debugflag:
444 if self.ui.debugflag:
445 self.ui.warn(str(inst), '\n')
445 self.ui.warn(str(inst), '\n')
446 partial, last, lrev = {}, nullid, nullrev
446 partial, last, lrev = {}, nullid, nullrev
447 return partial, last, lrev
447 return partial, last, lrev
448
448
449 def _writebranchcache(self, branches, tip, tiprev):
449 def _writebranchcache(self, branches, tip, tiprev):
450 try:
450 try:
451 f = self.opener("branchheads.cache", "w", atomictemp=True)
451 f = self.opener("branchheads.cache", "w", atomictemp=True)
452 f.write("%s %s\n" % (hex(tip), tiprev))
452 f.write("%s %s\n" % (hex(tip), tiprev))
453 for label, nodes in branches.iteritems():
453 for label, nodes in branches.iteritems():
454 for node in nodes:
454 for node in nodes:
455 f.write("%s %s\n" % (hex(node), label))
455 f.write("%s %s\n" % (hex(node), label))
456 f.rename()
456 f.rename()
457 except (IOError, OSError):
457 except (IOError, OSError):
458 pass
458 pass
459
459
460 def _updatebranchcache(self, partial, start, end):
460 def _updatebranchcache(self, partial, start, end):
461 for r in xrange(start, end):
461 for r in xrange(start, end):
462 c = self[r]
462 c = self[r]
463 b = c.branch()
463 b = c.branch()
464 bheads = partial.setdefault(b, [])
464 bheads = partial.setdefault(b, [])
465 bheads.append(c.node())
465 bheads.append(c.node())
466 for p in c.parents():
466 for p in c.parents():
467 pn = p.node()
467 pn = p.node()
468 if pn in bheads:
468 if pn in bheads:
469 bheads.remove(pn)
469 bheads.remove(pn)
470
470
471 def lookup(self, key):
471 def lookup(self, key):
472 if isinstance(key, int):
472 if isinstance(key, int):
473 return self.changelog.node(key)
473 return self.changelog.node(key)
474 elif key == '.':
474 elif key == '.':
475 return self.dirstate.parents()[0]
475 return self.dirstate.parents()[0]
476 elif key == 'null':
476 elif key == 'null':
477 return nullid
477 return nullid
478 elif key == 'tip':
478 elif key == 'tip':
479 return self.changelog.tip()
479 return self.changelog.tip()
480 n = self.changelog._match(key)
480 n = self.changelog._match(key)
481 if n:
481 if n:
482 return n
482 return n
483 if key in self.tags():
483 if key in self.tags():
484 return self.tags()[key]
484 return self.tags()[key]
485 if key in self.branchtags():
485 if key in self.branchtags():
486 return self.branchtags()[key]
486 return self.branchtags()[key]
487 n = self.changelog._partialmatch(key)
487 n = self.changelog._partialmatch(key)
488 if n:
488 if n:
489 return n
489 return n
490 try:
490 try:
491 if len(key) == 20:
491 if len(key) == 20:
492 key = hex(key)
492 key = hex(key)
493 except:
493 except:
494 pass
494 pass
495 raise error.RepoError(_("unknown revision '%s'") % key)
495 raise error.RepoError(_("unknown revision '%s'") % key)
496
496
497 def local(self):
497 def local(self):
498 return True
498 return True
499
499
500 def join(self, f):
500 def join(self, f):
501 return os.path.join(self.path, f)
501 return os.path.join(self.path, f)
502
502
503 def wjoin(self, f):
503 def wjoin(self, f):
504 return os.path.join(self.root, f)
504 return os.path.join(self.root, f)
505
505
506 def rjoin(self, f):
506 def rjoin(self, f):
507 return os.path.join(self.root, util.pconvert(f))
507 return os.path.join(self.root, util.pconvert(f))
508
508
509 def file(self, f):
509 def file(self, f):
510 if f[0] == '/':
510 if f[0] == '/':
511 f = f[1:]
511 f = f[1:]
512 return filelog.filelog(self.sopener, f)
512 return filelog.filelog(self.sopener, f)
513
513
514 def changectx(self, changeid):
514 def changectx(self, changeid):
515 return self[changeid]
515 return self[changeid]
516
516
517 def parents(self, changeid=None):
517 def parents(self, changeid=None):
518 '''get list of changectxs for parents of changeid'''
518 '''get list of changectxs for parents of changeid'''
519 return self[changeid].parents()
519 return self[changeid].parents()
520
520
521 def filectx(self, path, changeid=None, fileid=None):
521 def filectx(self, path, changeid=None, fileid=None):
522 """changeid can be a changeset revision, node, or tag.
522 """changeid can be a changeset revision, node, or tag.
523 fileid can be a file revision or node."""
523 fileid can be a file revision or node."""
524 return context.filectx(self, path, changeid, fileid)
524 return context.filectx(self, path, changeid, fileid)
525
525
526 def getcwd(self):
526 def getcwd(self):
527 return self.dirstate.getcwd()
527 return self.dirstate.getcwd()
528
528
529 def pathto(self, f, cwd=None):
529 def pathto(self, f, cwd=None):
530 return self.dirstate.pathto(f, cwd)
530 return self.dirstate.pathto(f, cwd)
531
531
532 def wfile(self, f, mode='r'):
532 def wfile(self, f, mode='r'):
533 return self.wopener(f, mode)
533 return self.wopener(f, mode)
534
534
535 def _link(self, f):
535 def _link(self, f):
536 return os.path.islink(self.wjoin(f))
536 return os.path.islink(self.wjoin(f))
537
537
538 def _filter(self, filter, filename, data):
538 def _filter(self, filter, filename, data):
539 if filter not in self.filterpats:
539 if filter not in self.filterpats:
540 l = []
540 l = []
541 for pat, cmd in self.ui.configitems(filter):
541 for pat, cmd in self.ui.configitems(filter):
542 if cmd == '!':
542 if cmd == '!':
543 continue
543 continue
544 mf = util.matcher(self.root, "", [pat], [], [])[1]
544 mf = util.matcher(self.root, "", [pat], [], [])[1]
545 fn = None
545 fn = None
546 params = cmd
546 params = cmd
547 for name, filterfn in self._datafilters.iteritems():
547 for name, filterfn in self._datafilters.iteritems():
548 if cmd.startswith(name):
548 if cmd.startswith(name):
549 fn = filterfn
549 fn = filterfn
550 params = cmd[len(name):].lstrip()
550 params = cmd[len(name):].lstrip()
551 break
551 break
552 if not fn:
552 if not fn:
553 fn = lambda s, c, **kwargs: util.filter(s, c)
553 fn = lambda s, c, **kwargs: util.filter(s, c)
554 # Wrap old filters not supporting keyword arguments
554 # Wrap old filters not supporting keyword arguments
555 if not inspect.getargspec(fn)[2]:
555 if not inspect.getargspec(fn)[2]:
556 oldfn = fn
556 oldfn = fn
557 fn = lambda s, c, **kwargs: oldfn(s, c)
557 fn = lambda s, c, **kwargs: oldfn(s, c)
558 l.append((mf, fn, params))
558 l.append((mf, fn, params))
559 self.filterpats[filter] = l
559 self.filterpats[filter] = l
560
560
561 for mf, fn, cmd in self.filterpats[filter]:
561 for mf, fn, cmd in self.filterpats[filter]:
562 if mf(filename):
562 if mf(filename):
563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 break
565 break
566
566
567 return data
567 return data
568
568
569 def adddatafilter(self, name, filter):
569 def adddatafilter(self, name, filter):
570 self._datafilters[name] = filter
570 self._datafilters[name] = filter
571
571
572 def wread(self, filename):
572 def wread(self, filename):
573 if self._link(filename):
573 if self._link(filename):
574 data = os.readlink(self.wjoin(filename))
574 data = os.readlink(self.wjoin(filename))
575 else:
575 else:
576 data = self.wopener(filename, 'r').read()
576 data = self.wopener(filename, 'r').read()
577 return self._filter("encode", filename, data)
577 return self._filter("encode", filename, data)
578
578
579 def wwrite(self, filename, data, flags):
579 def wwrite(self, filename, data, flags):
580 data = self._filter("decode", filename, data)
580 data = self._filter("decode", filename, data)
581 try:
581 try:
582 os.unlink(self.wjoin(filename))
582 os.unlink(self.wjoin(filename))
583 except OSError:
583 except OSError:
584 pass
584 pass
585 if 'l' in flags:
585 if 'l' in flags:
586 self.wopener.symlink(data, filename)
586 self.wopener.symlink(data, filename)
587 else:
587 else:
588 self.wopener(filename, 'w').write(data)
588 self.wopener(filename, 'w').write(data)
589 if 'x' in flags:
589 if 'x' in flags:
590 util.set_flags(self.wjoin(filename), False, True)
590 util.set_flags(self.wjoin(filename), False, True)
591
591
592 def wwritedata(self, filename, data):
592 def wwritedata(self, filename, data):
593 return self._filter("decode", filename, data)
593 return self._filter("decode", filename, data)
594
594
595 def transaction(self):
595 def transaction(self):
596 tr = self._transref and self._transref() or None
596 tr = self._transref and self._transref() or None
597 if tr and tr.running():
597 if tr and tr.running():
598 return tr.nest()
598 return tr.nest()
599
599
600 # abort here if the journal already exists
600 # abort here if the journal already exists
601 if os.path.exists(self.sjoin("journal")):
601 if os.path.exists(self.sjoin("journal")):
602 raise error.RepoError(_("journal already exists - run hg recover"))
602 raise error.RepoError(_("journal already exists - run hg recover"))
603
603
604 # save dirstate for rollback
604 # save dirstate for rollback
605 try:
605 try:
606 ds = self.opener("dirstate").read()
606 ds = self.opener("dirstate").read()
607 except IOError:
607 except IOError:
608 ds = ""
608 ds = ""
609 self.opener("journal.dirstate", "w").write(ds)
609 self.opener("journal.dirstate", "w").write(ds)
610 self.opener("journal.branch", "w").write(self.dirstate.branch())
610 self.opener("journal.branch", "w").write(self.dirstate.branch())
611
611
612 renames = [(self.sjoin("journal"), self.sjoin("undo")),
612 renames = [(self.sjoin("journal"), self.sjoin("undo")),
613 (self.join("journal.dirstate"), self.join("undo.dirstate")),
613 (self.join("journal.dirstate"), self.join("undo.dirstate")),
614 (self.join("journal.branch"), self.join("undo.branch"))]
614 (self.join("journal.branch"), self.join("undo.branch"))]
615 tr = transaction.transaction(self.ui.warn, self.sopener,
615 tr = transaction.transaction(self.ui.warn, self.sopener,
616 self.sjoin("journal"),
616 self.sjoin("journal"),
617 aftertrans(renames),
617 aftertrans(renames),
618 self.store.createmode)
618 self.store.createmode)
619 self._transref = weakref.ref(tr)
619 self._transref = weakref.ref(tr)
620 return tr
620 return tr
621
621
622 def recover(self):
622 def recover(self):
623 lock = self.lock()
623 lock = self.lock()
624 try:
624 try:
625 if os.path.exists(self.sjoin("journal")):
625 if os.path.exists(self.sjoin("journal")):
626 self.ui.status(_("rolling back interrupted transaction\n"))
626 self.ui.status(_("rolling back interrupted transaction\n"))
627 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
627 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
628 self.invalidate()
628 self.invalidate()
629 return True
629 return True
630 else:
630 else:
631 self.ui.warn(_("no interrupted transaction available\n"))
631 self.ui.warn(_("no interrupted transaction available\n"))
632 return False
632 return False
633 finally:
633 finally:
634 lock.release()
634 lock.release()
635
635
636 def rollback(self):
636 def rollback(self):
637 wlock = lock = None
637 wlock = lock = None
638 try:
638 try:
639 wlock = self.wlock()
639 wlock = self.wlock()
640 lock = self.lock()
640 lock = self.lock()
641 if os.path.exists(self.sjoin("undo")):
641 if os.path.exists(self.sjoin("undo")):
642 self.ui.status(_("rolling back last transaction\n"))
642 self.ui.status(_("rolling back last transaction\n"))
643 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
643 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
645 try:
645 try:
646 branch = self.opener("undo.branch").read()
646 branch = self.opener("undo.branch").read()
647 self.dirstate.setbranch(branch)
647 self.dirstate.setbranch(branch)
648 except IOError:
648 except IOError:
649 self.ui.warn(_("Named branch could not be reset, "
649 self.ui.warn(_("Named branch could not be reset, "
650 "current branch still is: %s\n")
650 "current branch still is: %s\n")
651 % encoding.tolocal(self.dirstate.branch()))
651 % encoding.tolocal(self.dirstate.branch()))
652 self.invalidate()
652 self.invalidate()
653 self.dirstate.invalidate()
653 self.dirstate.invalidate()
654 else:
654 else:
655 self.ui.warn(_("no rollback information available\n"))
655 self.ui.warn(_("no rollback information available\n"))
656 finally:
656 finally:
657 release(lock, wlock)
657 release(lock, wlock)
658
658
659 def invalidate(self):
659 def invalidate(self):
660 for a in "changelog manifest".split():
660 for a in "changelog manifest".split():
661 if a in self.__dict__:
661 if a in self.__dict__:
662 delattr(self, a)
662 delattr(self, a)
663 self.tagscache = None
663 self.tagscache = None
664 self._tagstypecache = None
664 self._tagstypecache = None
665 self.nodetagscache = None
665 self.nodetagscache = None
666 self.branchcache = None
666 self.branchcache = None
667 self._ubranchcache = None
667 self._ubranchcache = None
668 self._branchcachetip = None
668 self._branchcachetip = None
669
669
670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
671 try:
671 try:
672 l = lock.lock(lockname, 0, releasefn, desc=desc)
672 l = lock.lock(lockname, 0, releasefn, desc=desc)
673 except error.LockHeld, inst:
673 except error.LockHeld, inst:
674 if not wait:
674 if not wait:
675 raise
675 raise
676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
677 (desc, inst.locker))
677 (desc, inst.locker))
678 # default to 600 seconds timeout
678 # default to 600 seconds timeout
679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
680 releasefn, desc=desc)
680 releasefn, desc=desc)
681 if acquirefn:
681 if acquirefn:
682 acquirefn()
682 acquirefn()
683 return l
683 return l
684
684
685 def lock(self, wait=True):
685 def lock(self, wait=True):
686 l = self._lockref and self._lockref()
686 l = self._lockref and self._lockref()
687 if l is not None and l.held:
687 if l is not None and l.held:
688 l.lock()
688 l.lock()
689 return l
689 return l
690
690
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 _('repository %s') % self.origroot)
692 _('repository %s') % self.origroot)
693 self._lockref = weakref.ref(l)
693 self._lockref = weakref.ref(l)
694 return l
694 return l
695
695
696 def wlock(self, wait=True):
696 def wlock(self, wait=True):
697 l = self._wlockref and self._wlockref()
697 l = self._wlockref and self._wlockref()
698 if l is not None and l.held:
698 if l is not None and l.held:
699 l.lock()
699 l.lock()
700 return l
700 return l
701
701
702 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
702 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
703 self.dirstate.invalidate, _('working directory of %s') %
703 self.dirstate.invalidate, _('working directory of %s') %
704 self.origroot)
704 self.origroot)
705 self._wlockref = weakref.ref(l)
705 self._wlockref = weakref.ref(l)
706 return l
706 return l
707
707
708 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
708 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
709 """
709 """
710 commit an individual file as part of a larger transaction
710 commit an individual file as part of a larger transaction
711 """
711 """
712
712
713 fname = fctx.path()
713 fname = fctx.path()
714 text = fctx.data()
714 text = fctx.data()
715 flog = self.file(fname)
715 flog = self.file(fname)
716 fparent1 = manifest1.get(fname, nullid)
716 fparent1 = manifest1.get(fname, nullid)
717 fparent2 = manifest2.get(fname, nullid)
717 fparent2 = manifest2.get(fname, nullid)
718
718
719 meta = {}
719 meta = {}
720 copy = fctx.renamed()
720 copy = fctx.renamed()
721 if copy and copy[0] != fname:
721 if copy and copy[0] != fname:
722 # Mark the new revision of this file as a copy of another
722 # Mark the new revision of this file as a copy of another
723 # file. This copy data will effectively act as a parent
723 # file. This copy data will effectively act as a parent
724 # of this new revision. If this is a merge, the first
724 # of this new revision. If this is a merge, the first
725 # parent will be the nullid (meaning "look up the copy data")
725 # parent will be the nullid (meaning "look up the copy data")
726 # and the second one will be the other parent. For example:
726 # and the second one will be the other parent. For example:
727 #
727 #
728 # 0 --- 1 --- 3 rev1 changes file foo
728 # 0 --- 1 --- 3 rev1 changes file foo
729 # \ / rev2 renames foo to bar and changes it
729 # \ / rev2 renames foo to bar and changes it
730 # \- 2 -/ rev3 should have bar with all changes and
730 # \- 2 -/ rev3 should have bar with all changes and
731 # should record that bar descends from
731 # should record that bar descends from
732 # bar in rev2 and foo in rev1
732 # bar in rev2 and foo in rev1
733 #
733 #
734 # this allows this merge to succeed:
734 # this allows this merge to succeed:
735 #
735 #
736 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
736 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
737 # \ / merging rev3 and rev4 should use bar@rev2
737 # \ / merging rev3 and rev4 should use bar@rev2
738 # \- 2 --- 4 as the merge base
738 # \- 2 --- 4 as the merge base
739 #
739 #
740
740
741 cfname = copy[0]
741 cfname = copy[0]
742 crev = manifest1.get(cfname)
742 crev = manifest1.get(cfname)
743 newfparent = fparent2
743 newfparent = fparent2
744
744
745 if manifest2: # branch merge
745 if manifest2: # branch merge
746 if fparent2 == nullid or crev is None: # copied on remote side
746 if fparent2 == nullid or crev is None: # copied on remote side
747 if cfname in manifest2:
747 if cfname in manifest2:
748 crev = manifest2[cfname]
748 crev = manifest2[cfname]
749 newfparent = fparent1
749 newfparent = fparent1
750
750
751 # find source in nearest ancestor if we've lost track
751 # find source in nearest ancestor if we've lost track
752 if not crev:
752 if not crev:
753 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
753 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
754 (fname, cfname))
754 (fname, cfname))
755 for ancestor in self['.'].ancestors():
755 for ancestor in self['.'].ancestors():
756 if cfname in ancestor:
756 if cfname in ancestor:
757 crev = ancestor[cfname].filenode()
757 crev = ancestor[cfname].filenode()
758 break
758 break
759
759
760 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
760 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
761 meta["copy"] = cfname
761 meta["copy"] = cfname
762 meta["copyrev"] = hex(crev)
762 meta["copyrev"] = hex(crev)
763 fparent1, fparent2 = nullid, newfparent
763 fparent1, fparent2 = nullid, newfparent
764 elif fparent2 != nullid:
764 elif fparent2 != nullid:
765 # is one parent an ancestor of the other?
765 # is one parent an ancestor of the other?
766 fparentancestor = flog.ancestor(fparent1, fparent2)
766 fparentancestor = flog.ancestor(fparent1, fparent2)
767 if fparentancestor == fparent1:
767 if fparentancestor == fparent1:
768 fparent1, fparent2 = fparent2, nullid
768 fparent1, fparent2 = fparent2, nullid
769 elif fparentancestor == fparent2:
769 elif fparentancestor == fparent2:
770 fparent2 = nullid
770 fparent2 = nullid
771
771
772 # is the file unmodified from the parent? report existing entry
772 # is the file unmodified from the parent? report existing entry
773 if fparent2 == nullid and not flog.cmp(fparent1, text) and not meta:
773 if fparent2 == nullid and not flog.cmp(fparent1, text) and not meta:
774 return fparent1
774 return fparent1
775
775
776 changelist.append(fname)
776 changelist.append(fname)
777 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
777 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
778
778
779 def commit(self, files=None, text="", user=None, date=None,
779 def commit(self, files=None, text="", user=None, date=None,
780 match=None, force=False, force_editor=False,
780 match=None, force=False, force_editor=False,
781 p1=None, p2=None, extra={}, empty_ok=False):
781 p1=None, p2=None, extra={}, empty_ok=False):
782 wlock = lock = None
782 wlock = lock = None
783 if extra.get("close"):
783 if extra.get("close"):
784 force = True
784 force = True
785 if files:
785 if files:
786 files = list(set(files))
786 files = list(set(files))
787 try:
787 try:
788 wlock = self.wlock()
788 wlock = self.wlock()
789 lock = self.lock()
789 lock = self.lock()
790
790
791 p1, p2 = self.dirstate.parents()
791 p1, p2 = self.dirstate.parents()
792
792
793 if (not force and p2 != nullid and
793 if (not force and p2 != nullid and
794 (match and (match.files() or match.anypats()))):
794 (match and (match.files() or match.anypats()))):
795 raise util.Abort(_('cannot partially commit a merge '
795 raise util.Abort(_('cannot partially commit a merge '
796 '(do not specify files or patterns)'))
796 '(do not specify files or patterns)'))
797
797
798 if files:
798 if files:
799 modified, removed = [], []
799 modified, removed = [], []
800 for f in files:
800 for f in files:
801 s = self.dirstate[f]
801 s = self.dirstate[f]
802 if s in 'nma':
802 if s in 'nma':
803 modified.append(f)
803 modified.append(f)
804 elif s == 'r':
804 elif s == 'r':
805 removed.append(f)
805 removed.append(f)
806 else:
806 else:
807 self.ui.warn(_("%s not tracked!\n") % f)
807 self.ui.warn(_("%s not tracked!\n") % f)
808 changes = [modified, [], removed, [], []]
808 changes = [modified, [], removed, [], []]
809 else:
809 else:
810 changes = self.status(match=match)
810 changes = self.status(match=match)
811
811
812 ms = merge_.mergestate(self)
812 ms = merge_.mergestate(self)
813 for f in changes[0]:
813 for f in changes[0]:
814 if f in ms and ms[f] == 'u':
814 if f in ms and ms[f] == 'u':
815 raise util.Abort(_("unresolved merge conflicts "
815 raise util.Abort(_("unresolved merge conflicts "
816 "(see hg resolve)"))
816 "(see hg resolve)"))
817 wctx = context.workingctx(self, (p1, p2), text, user, date,
817 wctx = context.workingctx(self, (p1, p2), text, user, date,
818 extra, changes)
818 extra, changes)
819 r = self._commitctx(wctx, force, force_editor, empty_ok,
819 r = self._commitctx(wctx, force, force_editor, empty_ok,
820 True, True)
820 True, True)
821 ms.reset()
821 ms.reset()
822 return r
822 return r
823
823
824 finally:
824 finally:
825 release(lock, wlock)
825 release(lock, wlock)
826
826
827 def commitctx(self, ctx):
827 def commitctx(self, ctx):
828 """Add a new revision to current repository.
828 """Add a new revision to current repository.
829
829
830 Revision information is passed in the context.memctx argument.
830 Revision information is passed in the context.memctx argument.
831 commitctx() does not touch the working directory.
831 commitctx() does not touch the working directory.
832 """
832 """
833 wlock = lock = None
833 lock = self.lock()
834 try:
834 try:
835 wlock = self.wlock()
836 lock = self.lock()
837 return self._commitctx(ctx, force=True, force_editor=False,
835 return self._commitctx(ctx, force=True, force_editor=False,
838 empty_ok=True, use_dirstate=False,
836 empty_ok=True, use_dirstate=False,
839 update_dirstate=False)
837 update_dirstate=False)
840 finally:
838 finally:
841 release(lock, wlock)
839 lock.release()
842
840
843 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
841 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
844 use_dirstate=True, update_dirstate=True):
842 use_dirstate=True, update_dirstate=True):
845 tr = None
843 tr = None
846 valid = 0 # don't save the dirstate if this isn't set
844 valid = 0 # don't save the dirstate if this isn't set
847 try:
845 try:
848 commit = sorted(wctx.modified() + wctx.added())
846 commit = sorted(wctx.modified() + wctx.added())
849 remove = wctx.removed()
847 remove = wctx.removed()
850 extra = wctx.extra().copy()
848 extra = wctx.extra().copy()
851 branchname = extra['branch']
849 branchname = extra['branch']
852 user = wctx.user()
850 user = wctx.user()
853 text = wctx.description()
851 text = wctx.description()
854
852
855 p1, p2 = [p.node() for p in wctx.parents()]
853 p1, p2 = [p.node() for p in wctx.parents()]
856 c1 = self.changelog.read(p1)
854 c1 = self.changelog.read(p1)
857 c2 = self.changelog.read(p2)
855 c2 = self.changelog.read(p2)
858 m1 = self.manifest.read(c1[0]).copy()
856 m1 = self.manifest.read(c1[0]).copy()
859 m2 = self.manifest.read(c2[0])
857 m2 = self.manifest.read(c2[0])
860
858
861 if use_dirstate:
859 if use_dirstate:
862 oldname = c1[5].get("branch") # stored in UTF-8
860 oldname = c1[5].get("branch") # stored in UTF-8
863 if (not commit and not remove and not force and p2 == nullid
861 if (not commit and not remove and not force and p2 == nullid
864 and branchname == oldname):
862 and branchname == oldname):
865 self.ui.status(_("nothing changed\n"))
863 self.ui.status(_("nothing changed\n"))
866 return None
864 return None
867
865
868 xp1 = hex(p1)
866 xp1 = hex(p1)
869 if p2 == nullid: xp2 = ''
867 if p2 == nullid: xp2 = ''
870 else: xp2 = hex(p2)
868 else: xp2 = hex(p2)
871
869
872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
870 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
873
871
874 tr = self.transaction()
872 tr = self.transaction()
875 trp = weakref.proxy(tr)
873 trp = weakref.proxy(tr)
876
874
877 # check in files
875 # check in files
878 new = {}
876 new = {}
879 changed = []
877 changed = []
880 linkrev = len(self)
878 linkrev = len(self)
881 for f in commit:
879 for f in commit:
882 self.ui.note(f + "\n")
880 self.ui.note(f + "\n")
883 try:
881 try:
884 fctx = wctx.filectx(f)
882 fctx = wctx.filectx(f)
885 newflags = fctx.flags()
883 newflags = fctx.flags()
886 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
884 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
887 if ((not changed or changed[-1] != f) and
885 if ((not changed or changed[-1] != f) and
888 m2.get(f) != new[f]):
886 m2.get(f) != new[f]):
889 # mention the file in the changelog if some
887 # mention the file in the changelog if some
890 # flag changed, even if there was no content
888 # flag changed, even if there was no content
891 # change.
889 # change.
892 if m1.flags(f) != newflags:
890 if m1.flags(f) != newflags:
893 changed.append(f)
891 changed.append(f)
894 m1.set(f, newflags)
892 m1.set(f, newflags)
895 if use_dirstate:
893 if use_dirstate:
896 self.dirstate.normal(f)
894 self.dirstate.normal(f)
897
895
898 except (OSError, IOError):
896 except (OSError, IOError):
899 if use_dirstate:
897 if use_dirstate:
900 self.ui.warn(_("trouble committing %s!\n") % f)
898 self.ui.warn(_("trouble committing %s!\n") % f)
901 raise
899 raise
902 else:
900 else:
903 remove.append(f)
901 remove.append(f)
904
902
905 updated, added = [], []
903 updated, added = [], []
906 for f in sorted(changed):
904 for f in sorted(changed):
907 if f in m1 or f in m2:
905 if f in m1 or f in m2:
908 updated.append(f)
906 updated.append(f)
909 else:
907 else:
910 added.append(f)
908 added.append(f)
911
909
912 # update manifest
910 # update manifest
913 m1.update(new)
911 m1.update(new)
914 removed = [f for f in sorted(remove) if f in m1 or f in m2]
912 removed = [f for f in sorted(remove) if f in m1 or f in m2]
915 removed1 = []
913 removed1 = []
916
914
917 for f in removed:
915 for f in removed:
918 if f in m1:
916 if f in m1:
919 del m1[f]
917 del m1[f]
920 removed1.append(f)
918 removed1.append(f)
921 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
919 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
922 (new, removed1))
920 (new, removed1))
923
921
924 # add changeset
922 # add changeset
925 if (not empty_ok and not text) or force_editor:
923 if (not empty_ok and not text) or force_editor:
926 edittext = []
924 edittext = []
927 if text:
925 if text:
928 edittext.append(text)
926 edittext.append(text)
929 edittext.append("")
927 edittext.append("")
930 edittext.append("") # Empty line between message and comments.
928 edittext.append("") # Empty line between message and comments.
931 edittext.append(_("HG: Enter commit message."
929 edittext.append(_("HG: Enter commit message."
932 " Lines beginning with 'HG:' are removed."))
930 " Lines beginning with 'HG:' are removed."))
933 edittext.append("HG: --")
931 edittext.append("HG: --")
934 edittext.append(_("HG: user: %s") % user)
932 edittext.append(_("HG: user: %s") % user)
935 if p2 != nullid:
933 if p2 != nullid:
936 edittext.append(_("HG: branch merge"))
934 edittext.append(_("HG: branch merge"))
937 if branchname:
935 if branchname:
938 edittext.append(_("HG: branch '%s'")
936 edittext.append(_("HG: branch '%s'")
939 % encoding.tolocal(branchname))
937 % encoding.tolocal(branchname))
940 edittext.extend([_("HG: added %s") % f for f in added])
938 edittext.extend([_("HG: added %s") % f for f in added])
941 edittext.extend([_("HG: changed %s") % f for f in updated])
939 edittext.extend([_("HG: changed %s") % f for f in updated])
942 edittext.extend([_("HG: removed %s") % f for f in removed])
940 edittext.extend([_("HG: removed %s") % f for f in removed])
943 if not added and not updated and not removed:
941 if not added and not updated and not removed:
944 edittext.append(_("HG: no files changed"))
942 edittext.append(_("HG: no files changed"))
945 edittext.append("")
943 edittext.append("")
946 # run editor in the repository root
944 # run editor in the repository root
947 olddir = os.getcwd()
945 olddir = os.getcwd()
948 os.chdir(self.root)
946 os.chdir(self.root)
949 text = self.ui.edit("\n".join(edittext), user)
947 text = self.ui.edit("\n".join(edittext), user)
950 os.chdir(olddir)
948 os.chdir(olddir)
951
949
952 lines = [line.rstrip() for line in text.rstrip().splitlines()]
950 lines = [line.rstrip() for line in text.rstrip().splitlines()]
953 while lines and not lines[0]:
951 while lines and not lines[0]:
954 del lines[0]
952 del lines[0]
955 if not lines and use_dirstate:
953 if not lines and use_dirstate:
956 raise util.Abort(_("empty commit message"))
954 raise util.Abort(_("empty commit message"))
957 text = '\n'.join(lines)
955 text = '\n'.join(lines)
958
956
959 self.changelog.delayupdate()
957 self.changelog.delayupdate()
960 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
958 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
961 user, wctx.date(), extra)
959 user, wctx.date(), extra)
962 p = lambda: self.changelog.writepending() and self.root or ""
960 p = lambda: self.changelog.writepending() and self.root or ""
963 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
961 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
964 parent2=xp2, pending=p)
962 parent2=xp2, pending=p)
965 self.changelog.finalize(trp)
963 self.changelog.finalize(trp)
966 tr.close()
964 tr.close()
967
965
968 if self.branchcache:
966 if self.branchcache:
969 self.branchtags()
967 self.branchtags()
970
968
971 if use_dirstate or update_dirstate:
969 if use_dirstate or update_dirstate:
972 self.dirstate.setparents(n)
970 self.dirstate.setparents(n)
973 if use_dirstate:
971 if use_dirstate:
974 for f in removed:
972 for f in removed:
975 self.dirstate.forget(f)
973 self.dirstate.forget(f)
976 valid = 1 # our dirstate updates are complete
974 valid = 1 # our dirstate updates are complete
977
975
978 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
976 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
979 return n
977 return n
980 finally:
978 finally:
981 if not valid: # don't save our updated dirstate
979 if not valid: # don't save our updated dirstate
982 self.dirstate.invalidate()
980 self.dirstate.invalidate()
983 del tr
981 del tr
984
982
985 def walk(self, match, node=None):
983 def walk(self, match, node=None):
986 '''
984 '''
987 walk recursively through the directory tree or a given
985 walk recursively through the directory tree or a given
988 changeset, finding all files matched by the match
986 changeset, finding all files matched by the match
989 function
987 function
990 '''
988 '''
991 return self[node].walk(match)
989 return self[node].walk(match)
992
990
993 def status(self, node1='.', node2=None, match=None,
991 def status(self, node1='.', node2=None, match=None,
994 ignored=False, clean=False, unknown=False):
992 ignored=False, clean=False, unknown=False):
995 """return status of files between two nodes or node and working directory
993 """return status of files between two nodes or node and working directory
996
994
997 If node1 is None, use the first dirstate parent instead.
995 If node1 is None, use the first dirstate parent instead.
998 If node2 is None, compare node1 with working directory.
996 If node2 is None, compare node1 with working directory.
999 """
997 """
1000
998
1001 def mfmatches(ctx):
999 def mfmatches(ctx):
1002 mf = ctx.manifest().copy()
1000 mf = ctx.manifest().copy()
1003 for fn in mf.keys():
1001 for fn in mf.keys():
1004 if not match(fn):
1002 if not match(fn):
1005 del mf[fn]
1003 del mf[fn]
1006 return mf
1004 return mf
1007
1005
1008 if isinstance(node1, context.changectx):
1006 if isinstance(node1, context.changectx):
1009 ctx1 = node1
1007 ctx1 = node1
1010 else:
1008 else:
1011 ctx1 = self[node1]
1009 ctx1 = self[node1]
1012 if isinstance(node2, context.changectx):
1010 if isinstance(node2, context.changectx):
1013 ctx2 = node2
1011 ctx2 = node2
1014 else:
1012 else:
1015 ctx2 = self[node2]
1013 ctx2 = self[node2]
1016
1014
1017 working = ctx2.rev() is None
1015 working = ctx2.rev() is None
1018 parentworking = working and ctx1 == self['.']
1016 parentworking = working and ctx1 == self['.']
1019 match = match or match_.always(self.root, self.getcwd())
1017 match = match or match_.always(self.root, self.getcwd())
1020 listignored, listclean, listunknown = ignored, clean, unknown
1018 listignored, listclean, listunknown = ignored, clean, unknown
1021
1019
1022 # load earliest manifest first for caching reasons
1020 # load earliest manifest first for caching reasons
1023 if not working and ctx2.rev() < ctx1.rev():
1021 if not working and ctx2.rev() < ctx1.rev():
1024 ctx2.manifest()
1022 ctx2.manifest()
1025
1023
1026 if not parentworking:
1024 if not parentworking:
1027 def bad(f, msg):
1025 def bad(f, msg):
1028 if f not in ctx1:
1026 if f not in ctx1:
1029 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1027 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1030 return False
1028 return False
1031 match.bad = bad
1029 match.bad = bad
1032
1030
1033 if working: # we need to scan the working dir
1031 if working: # we need to scan the working dir
1034 s = self.dirstate.status(match, listignored, listclean, listunknown)
1032 s = self.dirstate.status(match, listignored, listclean, listunknown)
1035 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1033 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1036
1034
1037 # check for any possibly clean files
1035 # check for any possibly clean files
1038 if parentworking and cmp:
1036 if parentworking and cmp:
1039 fixup = []
1037 fixup = []
1040 # do a full compare of any files that might have changed
1038 # do a full compare of any files that might have changed
1041 for f in sorted(cmp):
1039 for f in sorted(cmp):
1042 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1040 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1043 or ctx1[f].cmp(ctx2[f].data())):
1041 or ctx1[f].cmp(ctx2[f].data())):
1044 modified.append(f)
1042 modified.append(f)
1045 else:
1043 else:
1046 fixup.append(f)
1044 fixup.append(f)
1047
1045
1048 if listclean:
1046 if listclean:
1049 clean += fixup
1047 clean += fixup
1050
1048
1051 # update dirstate for files that are actually clean
1049 # update dirstate for files that are actually clean
1052 if fixup:
1050 if fixup:
1053 wlock = None
1051 wlock = None
1054 try:
1052 try:
1055 try:
1053 try:
1056 # updating the dirstate is optional
1054 # updating the dirstate is optional
1057 # so we don't wait on the lock
1055 # so we don't wait on the lock
1058 wlock = self.wlock(False)
1056 wlock = self.wlock(False)
1059 for f in fixup:
1057 for f in fixup:
1060 self.dirstate.normal(f)
1058 self.dirstate.normal(f)
1061 except error.LockError:
1059 except error.LockError:
1062 pass
1060 pass
1063 finally:
1061 finally:
1064 release(wlock)
1062 release(wlock)
1065
1063
1066 if not parentworking:
1064 if not parentworking:
1067 mf1 = mfmatches(ctx1)
1065 mf1 = mfmatches(ctx1)
1068 if working:
1066 if working:
1069 # we are comparing working dir against non-parent
1067 # we are comparing working dir against non-parent
1070 # generate a pseudo-manifest for the working dir
1068 # generate a pseudo-manifest for the working dir
1071 mf2 = mfmatches(self['.'])
1069 mf2 = mfmatches(self['.'])
1072 for f in cmp + modified + added:
1070 for f in cmp + modified + added:
1073 mf2[f] = None
1071 mf2[f] = None
1074 mf2.set(f, ctx2.flags(f))
1072 mf2.set(f, ctx2.flags(f))
1075 for f in removed:
1073 for f in removed:
1076 if f in mf2:
1074 if f in mf2:
1077 del mf2[f]
1075 del mf2[f]
1078 else:
1076 else:
1079 # we are comparing two revisions
1077 # we are comparing two revisions
1080 deleted, unknown, ignored = [], [], []
1078 deleted, unknown, ignored = [], [], []
1081 mf2 = mfmatches(ctx2)
1079 mf2 = mfmatches(ctx2)
1082
1080
1083 modified, added, clean = [], [], []
1081 modified, added, clean = [], [], []
1084 for fn in mf2:
1082 for fn in mf2:
1085 if fn in mf1:
1083 if fn in mf1:
1086 if (mf1.flags(fn) != mf2.flags(fn) or
1084 if (mf1.flags(fn) != mf2.flags(fn) or
1087 (mf1[fn] != mf2[fn] and
1085 (mf1[fn] != mf2[fn] and
1088 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1086 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1089 modified.append(fn)
1087 modified.append(fn)
1090 elif listclean:
1088 elif listclean:
1091 clean.append(fn)
1089 clean.append(fn)
1092 del mf1[fn]
1090 del mf1[fn]
1093 else:
1091 else:
1094 added.append(fn)
1092 added.append(fn)
1095 removed = mf1.keys()
1093 removed = mf1.keys()
1096
1094
1097 r = modified, added, removed, deleted, unknown, ignored, clean
1095 r = modified, added, removed, deleted, unknown, ignored, clean
1098 [l.sort() for l in r]
1096 [l.sort() for l in r]
1099 return r
1097 return r
1100
1098
1101 def add(self, list):
1099 def add(self, list):
1102 wlock = self.wlock()
1100 wlock = self.wlock()
1103 try:
1101 try:
1104 rejected = []
1102 rejected = []
1105 for f in list:
1103 for f in list:
1106 p = self.wjoin(f)
1104 p = self.wjoin(f)
1107 try:
1105 try:
1108 st = os.lstat(p)
1106 st = os.lstat(p)
1109 except:
1107 except:
1110 self.ui.warn(_("%s does not exist!\n") % f)
1108 self.ui.warn(_("%s does not exist!\n") % f)
1111 rejected.append(f)
1109 rejected.append(f)
1112 continue
1110 continue
1113 if st.st_size > 10000000:
1111 if st.st_size > 10000000:
1114 self.ui.warn(_("%s: files over 10MB may cause memory and"
1112 self.ui.warn(_("%s: files over 10MB may cause memory and"
1115 " performance problems\n"
1113 " performance problems\n"
1116 "(use 'hg revert %s' to unadd the file)\n")
1114 "(use 'hg revert %s' to unadd the file)\n")
1117 % (f, f))
1115 % (f, f))
1118 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1116 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1119 self.ui.warn(_("%s not added: only files and symlinks "
1117 self.ui.warn(_("%s not added: only files and symlinks "
1120 "supported currently\n") % f)
1118 "supported currently\n") % f)
1121 rejected.append(p)
1119 rejected.append(p)
1122 elif self.dirstate[f] in 'amn':
1120 elif self.dirstate[f] in 'amn':
1123 self.ui.warn(_("%s already tracked!\n") % f)
1121 self.ui.warn(_("%s already tracked!\n") % f)
1124 elif self.dirstate[f] == 'r':
1122 elif self.dirstate[f] == 'r':
1125 self.dirstate.normallookup(f)
1123 self.dirstate.normallookup(f)
1126 else:
1124 else:
1127 self.dirstate.add(f)
1125 self.dirstate.add(f)
1128 return rejected
1126 return rejected
1129 finally:
1127 finally:
1130 wlock.release()
1128 wlock.release()
1131
1129
1132 def forget(self, list):
1130 def forget(self, list):
1133 wlock = self.wlock()
1131 wlock = self.wlock()
1134 try:
1132 try:
1135 for f in list:
1133 for f in list:
1136 if self.dirstate[f] != 'a':
1134 if self.dirstate[f] != 'a':
1137 self.ui.warn(_("%s not added!\n") % f)
1135 self.ui.warn(_("%s not added!\n") % f)
1138 else:
1136 else:
1139 self.dirstate.forget(f)
1137 self.dirstate.forget(f)
1140 finally:
1138 finally:
1141 wlock.release()
1139 wlock.release()
1142
1140
1143 def remove(self, list, unlink=False):
1141 def remove(self, list, unlink=False):
1144 wlock = None
1142 wlock = None
1145 try:
1143 try:
1146 if unlink:
1144 if unlink:
1147 for f in list:
1145 for f in list:
1148 try:
1146 try:
1149 util.unlink(self.wjoin(f))
1147 util.unlink(self.wjoin(f))
1150 except OSError, inst:
1148 except OSError, inst:
1151 if inst.errno != errno.ENOENT:
1149 if inst.errno != errno.ENOENT:
1152 raise
1150 raise
1153 wlock = self.wlock()
1151 wlock = self.wlock()
1154 for f in list:
1152 for f in list:
1155 if unlink and os.path.exists(self.wjoin(f)):
1153 if unlink and os.path.exists(self.wjoin(f)):
1156 self.ui.warn(_("%s still exists!\n") % f)
1154 self.ui.warn(_("%s still exists!\n") % f)
1157 elif self.dirstate[f] == 'a':
1155 elif self.dirstate[f] == 'a':
1158 self.dirstate.forget(f)
1156 self.dirstate.forget(f)
1159 elif f not in self.dirstate:
1157 elif f not in self.dirstate:
1160 self.ui.warn(_("%s not tracked!\n") % f)
1158 self.ui.warn(_("%s not tracked!\n") % f)
1161 else:
1159 else:
1162 self.dirstate.remove(f)
1160 self.dirstate.remove(f)
1163 finally:
1161 finally:
1164 release(wlock)
1162 release(wlock)
1165
1163
1166 def undelete(self, list):
1164 def undelete(self, list):
1167 manifests = [self.manifest.read(self.changelog.read(p)[0])
1165 manifests = [self.manifest.read(self.changelog.read(p)[0])
1168 for p in self.dirstate.parents() if p != nullid]
1166 for p in self.dirstate.parents() if p != nullid]
1169 wlock = self.wlock()
1167 wlock = self.wlock()
1170 try:
1168 try:
1171 for f in list:
1169 for f in list:
1172 if self.dirstate[f] != 'r':
1170 if self.dirstate[f] != 'r':
1173 self.ui.warn(_("%s not removed!\n") % f)
1171 self.ui.warn(_("%s not removed!\n") % f)
1174 else:
1172 else:
1175 m = f in manifests[0] and manifests[0] or manifests[1]
1173 m = f in manifests[0] and manifests[0] or manifests[1]
1176 t = self.file(f).read(m[f])
1174 t = self.file(f).read(m[f])
1177 self.wwrite(f, t, m.flags(f))
1175 self.wwrite(f, t, m.flags(f))
1178 self.dirstate.normal(f)
1176 self.dirstate.normal(f)
1179 finally:
1177 finally:
1180 wlock.release()
1178 wlock.release()
1181
1179
1182 def copy(self, source, dest):
1180 def copy(self, source, dest):
1183 p = self.wjoin(dest)
1181 p = self.wjoin(dest)
1184 if not (os.path.exists(p) or os.path.islink(p)):
1182 if not (os.path.exists(p) or os.path.islink(p)):
1185 self.ui.warn(_("%s does not exist!\n") % dest)
1183 self.ui.warn(_("%s does not exist!\n") % dest)
1186 elif not (os.path.isfile(p) or os.path.islink(p)):
1184 elif not (os.path.isfile(p) or os.path.islink(p)):
1187 self.ui.warn(_("copy failed: %s is not a file or a "
1185 self.ui.warn(_("copy failed: %s is not a file or a "
1188 "symbolic link\n") % dest)
1186 "symbolic link\n") % dest)
1189 else:
1187 else:
1190 wlock = self.wlock()
1188 wlock = self.wlock()
1191 try:
1189 try:
1192 if self.dirstate[dest] in '?r':
1190 if self.dirstate[dest] in '?r':
1193 self.dirstate.add(dest)
1191 self.dirstate.add(dest)
1194 self.dirstate.copy(source, dest)
1192 self.dirstate.copy(source, dest)
1195 finally:
1193 finally:
1196 wlock.release()
1194 wlock.release()
1197
1195
1198 def heads(self, start=None, closed=True):
1196 def heads(self, start=None, closed=True):
1199 heads = self.changelog.heads(start)
1197 heads = self.changelog.heads(start)
1200 def display(head):
1198 def display(head):
1201 if closed:
1199 if closed:
1202 return True
1200 return True
1203 extras = self.changelog.read(head)[5]
1201 extras = self.changelog.read(head)[5]
1204 return ('close' not in extras)
1202 return ('close' not in extras)
1205 # sort the output in rev descending order
1203 # sort the output in rev descending order
1206 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1204 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1207 return [n for (r, n) in sorted(heads)]
1205 return [n for (r, n) in sorted(heads)]
1208
1206
1209 def branchheads(self, branch=None, start=None, closed=True):
1207 def branchheads(self, branch=None, start=None, closed=True):
1210 if branch is None:
1208 if branch is None:
1211 branch = self[None].branch()
1209 branch = self[None].branch()
1212 branches = self._branchheads()
1210 branches = self._branchheads()
1213 if branch not in branches:
1211 if branch not in branches:
1214 return []
1212 return []
1215 bheads = branches[branch]
1213 bheads = branches[branch]
1216 # the cache returns heads ordered lowest to highest
1214 # the cache returns heads ordered lowest to highest
1217 bheads.reverse()
1215 bheads.reverse()
1218 if start is not None:
1216 if start is not None:
1219 # filter out the heads that cannot be reached from startrev
1217 # filter out the heads that cannot be reached from startrev
1220 bheads = self.changelog.nodesbetween([start], bheads)[2]
1218 bheads = self.changelog.nodesbetween([start], bheads)[2]
1221 if not closed:
1219 if not closed:
1222 bheads = [h for h in bheads if
1220 bheads = [h for h in bheads if
1223 ('close' not in self.changelog.read(h)[5])]
1221 ('close' not in self.changelog.read(h)[5])]
1224 return bheads
1222 return bheads
1225
1223
1226 def branches(self, nodes):
1224 def branches(self, nodes):
1227 if not nodes:
1225 if not nodes:
1228 nodes = [self.changelog.tip()]
1226 nodes = [self.changelog.tip()]
1229 b = []
1227 b = []
1230 for n in nodes:
1228 for n in nodes:
1231 t = n
1229 t = n
1232 while 1:
1230 while 1:
1233 p = self.changelog.parents(n)
1231 p = self.changelog.parents(n)
1234 if p[1] != nullid or p[0] == nullid:
1232 if p[1] != nullid or p[0] == nullid:
1235 b.append((t, n, p[0], p[1]))
1233 b.append((t, n, p[0], p[1]))
1236 break
1234 break
1237 n = p[0]
1235 n = p[0]
1238 return b
1236 return b
1239
1237
1240 def between(self, pairs):
1238 def between(self, pairs):
1241 r = []
1239 r = []
1242
1240
1243 for top, bottom in pairs:
1241 for top, bottom in pairs:
1244 n, l, i = top, [], 0
1242 n, l, i = top, [], 0
1245 f = 1
1243 f = 1
1246
1244
1247 while n != bottom and n != nullid:
1245 while n != bottom and n != nullid:
1248 p = self.changelog.parents(n)[0]
1246 p = self.changelog.parents(n)[0]
1249 if i == f:
1247 if i == f:
1250 l.append(n)
1248 l.append(n)
1251 f = f * 2
1249 f = f * 2
1252 n = p
1250 n = p
1253 i += 1
1251 i += 1
1254
1252
1255 r.append(l)
1253 r.append(l)
1256
1254
1257 return r
1255 return r
1258
1256
1259 def findincoming(self, remote, base=None, heads=None, force=False):
1257 def findincoming(self, remote, base=None, heads=None, force=False):
1260 """Return list of roots of the subsets of missing nodes from remote
1258 """Return list of roots of the subsets of missing nodes from remote
1261
1259
1262 If base dict is specified, assume that these nodes and their parents
1260 If base dict is specified, assume that these nodes and their parents
1263 exist on the remote side and that no child of a node of base exists
1261 exist on the remote side and that no child of a node of base exists
1264 in both remote and self.
1262 in both remote and self.
1265 Furthermore base will be updated to include the nodes that exists
1263 Furthermore base will be updated to include the nodes that exists
1266 in self and remote but no children exists in self and remote.
1264 in self and remote but no children exists in self and remote.
1267 If a list of heads is specified, return only nodes which are heads
1265 If a list of heads is specified, return only nodes which are heads
1268 or ancestors of these heads.
1266 or ancestors of these heads.
1269
1267
1270 All the ancestors of base are in self and in remote.
1268 All the ancestors of base are in self and in remote.
1271 All the descendants of the list returned are missing in self.
1269 All the descendants of the list returned are missing in self.
1272 (and so we know that the rest of the nodes are missing in remote, see
1270 (and so we know that the rest of the nodes are missing in remote, see
1273 outgoing)
1271 outgoing)
1274 """
1272 """
1275 return self.findcommonincoming(remote, base, heads, force)[1]
1273 return self.findcommonincoming(remote, base, heads, force)[1]
1276
1274
1277 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1275 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1278 """Return a tuple (common, missing roots, heads) used to identify
1276 """Return a tuple (common, missing roots, heads) used to identify
1279 missing nodes from remote.
1277 missing nodes from remote.
1280
1278
1281 If base dict is specified, assume that these nodes and their parents
1279 If base dict is specified, assume that these nodes and their parents
1282 exist on the remote side and that no child of a node of base exists
1280 exist on the remote side and that no child of a node of base exists
1283 in both remote and self.
1281 in both remote and self.
1284 Furthermore base will be updated to include the nodes that exists
1282 Furthermore base will be updated to include the nodes that exists
1285 in self and remote but no children exists in self and remote.
1283 in self and remote but no children exists in self and remote.
1286 If a list of heads is specified, return only nodes which are heads
1284 If a list of heads is specified, return only nodes which are heads
1287 or ancestors of these heads.
1285 or ancestors of these heads.
1288
1286
1289 All the ancestors of base are in self and in remote.
1287 All the ancestors of base are in self and in remote.
1290 """
1288 """
1291 m = self.changelog.nodemap
1289 m = self.changelog.nodemap
1292 search = []
1290 search = []
1293 fetch = set()
1291 fetch = set()
1294 seen = set()
1292 seen = set()
1295 seenbranch = set()
1293 seenbranch = set()
1296 if base == None:
1294 if base == None:
1297 base = {}
1295 base = {}
1298
1296
1299 if not heads:
1297 if not heads:
1300 heads = remote.heads()
1298 heads = remote.heads()
1301
1299
1302 if self.changelog.tip() == nullid:
1300 if self.changelog.tip() == nullid:
1303 base[nullid] = 1
1301 base[nullid] = 1
1304 if heads != [nullid]:
1302 if heads != [nullid]:
1305 return [nullid], [nullid], list(heads)
1303 return [nullid], [nullid], list(heads)
1306 return [nullid], [], []
1304 return [nullid], [], []
1307
1305
1308 # assume we're closer to the tip than the root
1306 # assume we're closer to the tip than the root
1309 # and start by examining the heads
1307 # and start by examining the heads
1310 self.ui.status(_("searching for changes\n"))
1308 self.ui.status(_("searching for changes\n"))
1311
1309
1312 unknown = []
1310 unknown = []
1313 for h in heads:
1311 for h in heads:
1314 if h not in m:
1312 if h not in m:
1315 unknown.append(h)
1313 unknown.append(h)
1316 else:
1314 else:
1317 base[h] = 1
1315 base[h] = 1
1318
1316
1319 heads = unknown
1317 heads = unknown
1320 if not unknown:
1318 if not unknown:
1321 return base.keys(), [], []
1319 return base.keys(), [], []
1322
1320
1323 req = set(unknown)
1321 req = set(unknown)
1324 reqcnt = 0
1322 reqcnt = 0
1325
1323
1326 # search through remote branches
1324 # search through remote branches
1327 # a 'branch' here is a linear segment of history, with four parts:
1325 # a 'branch' here is a linear segment of history, with four parts:
1328 # head, root, first parent, second parent
1326 # head, root, first parent, second parent
1329 # (a branch always has two parents (or none) by definition)
1327 # (a branch always has two parents (or none) by definition)
1330 unknown = remote.branches(unknown)
1328 unknown = remote.branches(unknown)
1331 while unknown:
1329 while unknown:
1332 r = []
1330 r = []
1333 while unknown:
1331 while unknown:
1334 n = unknown.pop(0)
1332 n = unknown.pop(0)
1335 if n[0] in seen:
1333 if n[0] in seen:
1336 continue
1334 continue
1337
1335
1338 self.ui.debug(_("examining %s:%s\n")
1336 self.ui.debug(_("examining %s:%s\n")
1339 % (short(n[0]), short(n[1])))
1337 % (short(n[0]), short(n[1])))
1340 if n[0] == nullid: # found the end of the branch
1338 if n[0] == nullid: # found the end of the branch
1341 pass
1339 pass
1342 elif n in seenbranch:
1340 elif n in seenbranch:
1343 self.ui.debug(_("branch already found\n"))
1341 self.ui.debug(_("branch already found\n"))
1344 continue
1342 continue
1345 elif n[1] and n[1] in m: # do we know the base?
1343 elif n[1] and n[1] in m: # do we know the base?
1346 self.ui.debug(_("found incomplete branch %s:%s\n")
1344 self.ui.debug(_("found incomplete branch %s:%s\n")
1347 % (short(n[0]), short(n[1])))
1345 % (short(n[0]), short(n[1])))
1348 search.append(n[0:2]) # schedule branch range for scanning
1346 search.append(n[0:2]) # schedule branch range for scanning
1349 seenbranch.add(n)
1347 seenbranch.add(n)
1350 else:
1348 else:
1351 if n[1] not in seen and n[1] not in fetch:
1349 if n[1] not in seen and n[1] not in fetch:
1352 if n[2] in m and n[3] in m:
1350 if n[2] in m and n[3] in m:
1353 self.ui.debug(_("found new changeset %s\n") %
1351 self.ui.debug(_("found new changeset %s\n") %
1354 short(n[1]))
1352 short(n[1]))
1355 fetch.add(n[1]) # earliest unknown
1353 fetch.add(n[1]) # earliest unknown
1356 for p in n[2:4]:
1354 for p in n[2:4]:
1357 if p in m:
1355 if p in m:
1358 base[p] = 1 # latest known
1356 base[p] = 1 # latest known
1359
1357
1360 for p in n[2:4]:
1358 for p in n[2:4]:
1361 if p not in req and p not in m:
1359 if p not in req and p not in m:
1362 r.append(p)
1360 r.append(p)
1363 req.add(p)
1361 req.add(p)
1364 seen.add(n[0])
1362 seen.add(n[0])
1365
1363
1366 if r:
1364 if r:
1367 reqcnt += 1
1365 reqcnt += 1
1368 self.ui.debug(_("request %d: %s\n") %
1366 self.ui.debug(_("request %d: %s\n") %
1369 (reqcnt, " ".join(map(short, r))))
1367 (reqcnt, " ".join(map(short, r))))
1370 for p in xrange(0, len(r), 10):
1368 for p in xrange(0, len(r), 10):
1371 for b in remote.branches(r[p:p+10]):
1369 for b in remote.branches(r[p:p+10]):
1372 self.ui.debug(_("received %s:%s\n") %
1370 self.ui.debug(_("received %s:%s\n") %
1373 (short(b[0]), short(b[1])))
1371 (short(b[0]), short(b[1])))
1374 unknown.append(b)
1372 unknown.append(b)
1375
1373
1376 # do binary search on the branches we found
1374 # do binary search on the branches we found
1377 while search:
1375 while search:
1378 newsearch = []
1376 newsearch = []
1379 reqcnt += 1
1377 reqcnt += 1
1380 for n, l in zip(search, remote.between(search)):
1378 for n, l in zip(search, remote.between(search)):
1381 l.append(n[1])
1379 l.append(n[1])
1382 p = n[0]
1380 p = n[0]
1383 f = 1
1381 f = 1
1384 for i in l:
1382 for i in l:
1385 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1383 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1386 if i in m:
1384 if i in m:
1387 if f <= 2:
1385 if f <= 2:
1388 self.ui.debug(_("found new branch changeset %s\n") %
1386 self.ui.debug(_("found new branch changeset %s\n") %
1389 short(p))
1387 short(p))
1390 fetch.add(p)
1388 fetch.add(p)
1391 base[i] = 1
1389 base[i] = 1
1392 else:
1390 else:
1393 self.ui.debug(_("narrowed branch search to %s:%s\n")
1391 self.ui.debug(_("narrowed branch search to %s:%s\n")
1394 % (short(p), short(i)))
1392 % (short(p), short(i)))
1395 newsearch.append((p, i))
1393 newsearch.append((p, i))
1396 break
1394 break
1397 p, f = i, f * 2
1395 p, f = i, f * 2
1398 search = newsearch
1396 search = newsearch
1399
1397
1400 # sanity check our fetch list
1398 # sanity check our fetch list
1401 for f in fetch:
1399 for f in fetch:
1402 if f in m:
1400 if f in m:
1403 raise error.RepoError(_("already have changeset ")
1401 raise error.RepoError(_("already have changeset ")
1404 + short(f[:4]))
1402 + short(f[:4]))
1405
1403
1406 if base.keys() == [nullid]:
1404 if base.keys() == [nullid]:
1407 if force:
1405 if force:
1408 self.ui.warn(_("warning: repository is unrelated\n"))
1406 self.ui.warn(_("warning: repository is unrelated\n"))
1409 else:
1407 else:
1410 raise util.Abort(_("repository is unrelated"))
1408 raise util.Abort(_("repository is unrelated"))
1411
1409
1412 self.ui.debug(_("found new changesets starting at ") +
1410 self.ui.debug(_("found new changesets starting at ") +
1413 " ".join([short(f) for f in fetch]) + "\n")
1411 " ".join([short(f) for f in fetch]) + "\n")
1414
1412
1415 self.ui.debug(_("%d total queries\n") % reqcnt)
1413 self.ui.debug(_("%d total queries\n") % reqcnt)
1416
1414
1417 return base.keys(), list(fetch), heads
1415 return base.keys(), list(fetch), heads
1418
1416
1419 def findoutgoing(self, remote, base=None, heads=None, force=False):
1417 def findoutgoing(self, remote, base=None, heads=None, force=False):
1420 """Return list of nodes that are roots of subsets not in remote
1418 """Return list of nodes that are roots of subsets not in remote
1421
1419
1422 If base dict is specified, assume that these nodes and their parents
1420 If base dict is specified, assume that these nodes and their parents
1423 exist on the remote side.
1421 exist on the remote side.
1424 If a list of heads is specified, return only nodes which are heads
1422 If a list of heads is specified, return only nodes which are heads
1425 or ancestors of these heads, and return a second element which
1423 or ancestors of these heads, and return a second element which
1426 contains all remote heads which get new children.
1424 contains all remote heads which get new children.
1427 """
1425 """
1428 if base == None:
1426 if base == None:
1429 base = {}
1427 base = {}
1430 self.findincoming(remote, base, heads, force=force)
1428 self.findincoming(remote, base, heads, force=force)
1431
1429
1432 self.ui.debug(_("common changesets up to ")
1430 self.ui.debug(_("common changesets up to ")
1433 + " ".join(map(short, base.keys())) + "\n")
1431 + " ".join(map(short, base.keys())) + "\n")
1434
1432
1435 remain = set(self.changelog.nodemap)
1433 remain = set(self.changelog.nodemap)
1436
1434
1437 # prune everything remote has from the tree
1435 # prune everything remote has from the tree
1438 remain.remove(nullid)
1436 remain.remove(nullid)
1439 remove = base.keys()
1437 remove = base.keys()
1440 while remove:
1438 while remove:
1441 n = remove.pop(0)
1439 n = remove.pop(0)
1442 if n in remain:
1440 if n in remain:
1443 remain.remove(n)
1441 remain.remove(n)
1444 for p in self.changelog.parents(n):
1442 for p in self.changelog.parents(n):
1445 remove.append(p)
1443 remove.append(p)
1446
1444
1447 # find every node whose parents have been pruned
1445 # find every node whose parents have been pruned
1448 subset = []
1446 subset = []
1449 # find every remote head that will get new children
1447 # find every remote head that will get new children
1450 updated_heads = {}
1448 updated_heads = {}
1451 for n in remain:
1449 for n in remain:
1452 p1, p2 = self.changelog.parents(n)
1450 p1, p2 = self.changelog.parents(n)
1453 if p1 not in remain and p2 not in remain:
1451 if p1 not in remain and p2 not in remain:
1454 subset.append(n)
1452 subset.append(n)
1455 if heads:
1453 if heads:
1456 if p1 in heads:
1454 if p1 in heads:
1457 updated_heads[p1] = True
1455 updated_heads[p1] = True
1458 if p2 in heads:
1456 if p2 in heads:
1459 updated_heads[p2] = True
1457 updated_heads[p2] = True
1460
1458
1461 # this is the set of all roots we have to push
1459 # this is the set of all roots we have to push
1462 if heads:
1460 if heads:
1463 return subset, updated_heads.keys()
1461 return subset, updated_heads.keys()
1464 else:
1462 else:
1465 return subset
1463 return subset
1466
1464
1467 def pull(self, remote, heads=None, force=False):
1465 def pull(self, remote, heads=None, force=False):
1468 lock = self.lock()
1466 lock = self.lock()
1469 try:
1467 try:
1470 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1468 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1471 force=force)
1469 force=force)
1472 if fetch == [nullid]:
1470 if fetch == [nullid]:
1473 self.ui.status(_("requesting all changes\n"))
1471 self.ui.status(_("requesting all changes\n"))
1474
1472
1475 if not fetch:
1473 if not fetch:
1476 self.ui.status(_("no changes found\n"))
1474 self.ui.status(_("no changes found\n"))
1477 return 0
1475 return 0
1478
1476
1479 if heads is None and remote.capable('changegroupsubset'):
1477 if heads is None and remote.capable('changegroupsubset'):
1480 heads = rheads
1478 heads = rheads
1481
1479
1482 if heads is None:
1480 if heads is None:
1483 cg = remote.changegroup(fetch, 'pull')
1481 cg = remote.changegroup(fetch, 'pull')
1484 else:
1482 else:
1485 if not remote.capable('changegroupsubset'):
1483 if not remote.capable('changegroupsubset'):
1486 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1484 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1487 cg = remote.changegroupsubset(fetch, heads, 'pull')
1485 cg = remote.changegroupsubset(fetch, heads, 'pull')
1488 return self.addchangegroup(cg, 'pull', remote.url())
1486 return self.addchangegroup(cg, 'pull', remote.url())
1489 finally:
1487 finally:
1490 lock.release()
1488 lock.release()
1491
1489
1492 def push(self, remote, force=False, revs=None):
1490 def push(self, remote, force=False, revs=None):
1493 # there are two ways to push to remote repo:
1491 # there are two ways to push to remote repo:
1494 #
1492 #
1495 # addchangegroup assumes local user can lock remote
1493 # addchangegroup assumes local user can lock remote
1496 # repo (local filesystem, old ssh servers).
1494 # repo (local filesystem, old ssh servers).
1497 #
1495 #
1498 # unbundle assumes local user cannot lock remote repo (new ssh
1496 # unbundle assumes local user cannot lock remote repo (new ssh
1499 # servers, http servers).
1497 # servers, http servers).
1500
1498
1501 if remote.capable('unbundle'):
1499 if remote.capable('unbundle'):
1502 return self.push_unbundle(remote, force, revs)
1500 return self.push_unbundle(remote, force, revs)
1503 return self.push_addchangegroup(remote, force, revs)
1501 return self.push_addchangegroup(remote, force, revs)
1504
1502
1505 def prepush(self, remote, force, revs):
1503 def prepush(self, remote, force, revs):
1506 common = {}
1504 common = {}
1507 remote_heads = remote.heads()
1505 remote_heads = remote.heads()
1508 inc = self.findincoming(remote, common, remote_heads, force=force)
1506 inc = self.findincoming(remote, common, remote_heads, force=force)
1509
1507
1510 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1508 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1511 if revs is not None:
1509 if revs is not None:
1512 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1510 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1513 else:
1511 else:
1514 bases, heads = update, self.changelog.heads()
1512 bases, heads = update, self.changelog.heads()
1515
1513
1516 if not bases:
1514 if not bases:
1517 self.ui.status(_("no changes found\n"))
1515 self.ui.status(_("no changes found\n"))
1518 return None, 1
1516 return None, 1
1519 elif not force:
1517 elif not force:
1520 # check if we're creating new remote heads
1518 # check if we're creating new remote heads
1521 # to be a remote head after push, node must be either
1519 # to be a remote head after push, node must be either
1522 # - unknown locally
1520 # - unknown locally
1523 # - a local outgoing head descended from update
1521 # - a local outgoing head descended from update
1524 # - a remote head that's known locally and not
1522 # - a remote head that's known locally and not
1525 # ancestral to an outgoing head
1523 # ancestral to an outgoing head
1526
1524
1527 warn = 0
1525 warn = 0
1528
1526
1529 if remote_heads == [nullid]:
1527 if remote_heads == [nullid]:
1530 warn = 0
1528 warn = 0
1531 elif not revs and len(heads) > len(remote_heads):
1529 elif not revs and len(heads) > len(remote_heads):
1532 warn = 1
1530 warn = 1
1533 else:
1531 else:
1534 newheads = list(heads)
1532 newheads = list(heads)
1535 for r in remote_heads:
1533 for r in remote_heads:
1536 if r in self.changelog.nodemap:
1534 if r in self.changelog.nodemap:
1537 desc = self.changelog.heads(r, heads)
1535 desc = self.changelog.heads(r, heads)
1538 l = [h for h in heads if h in desc]
1536 l = [h for h in heads if h in desc]
1539 if not l:
1537 if not l:
1540 newheads.append(r)
1538 newheads.append(r)
1541 else:
1539 else:
1542 newheads.append(r)
1540 newheads.append(r)
1543 if len(newheads) > len(remote_heads):
1541 if len(newheads) > len(remote_heads):
1544 warn = 1
1542 warn = 1
1545
1543
1546 if warn:
1544 if warn:
1547 self.ui.warn(_("abort: push creates new remote heads!\n"))
1545 self.ui.warn(_("abort: push creates new remote heads!\n"))
1548 self.ui.status(_("(did you forget to merge?"
1546 self.ui.status(_("(did you forget to merge?"
1549 " use push -f to force)\n"))
1547 " use push -f to force)\n"))
1550 return None, 0
1548 return None, 0
1551 elif inc:
1549 elif inc:
1552 self.ui.warn(_("note: unsynced remote changes!\n"))
1550 self.ui.warn(_("note: unsynced remote changes!\n"))
1553
1551
1554
1552
1555 if revs is None:
1553 if revs is None:
1556 # use the fast path, no race possible on push
1554 # use the fast path, no race possible on push
1557 cg = self._changegroup(common.keys(), 'push')
1555 cg = self._changegroup(common.keys(), 'push')
1558 else:
1556 else:
1559 cg = self.changegroupsubset(update, revs, 'push')
1557 cg = self.changegroupsubset(update, revs, 'push')
1560 return cg, remote_heads
1558 return cg, remote_heads
1561
1559
1562 def push_addchangegroup(self, remote, force, revs):
1560 def push_addchangegroup(self, remote, force, revs):
1563 lock = remote.lock()
1561 lock = remote.lock()
1564 try:
1562 try:
1565 ret = self.prepush(remote, force, revs)
1563 ret = self.prepush(remote, force, revs)
1566 if ret[0] is not None:
1564 if ret[0] is not None:
1567 cg, remote_heads = ret
1565 cg, remote_heads = ret
1568 return remote.addchangegroup(cg, 'push', self.url())
1566 return remote.addchangegroup(cg, 'push', self.url())
1569 return ret[1]
1567 return ret[1]
1570 finally:
1568 finally:
1571 lock.release()
1569 lock.release()
1572
1570
1573 def push_unbundle(self, remote, force, revs):
1571 def push_unbundle(self, remote, force, revs):
1574 # local repo finds heads on server, finds out what revs it
1572 # local repo finds heads on server, finds out what revs it
1575 # must push. once revs transferred, if server finds it has
1573 # must push. once revs transferred, if server finds it has
1576 # different heads (someone else won commit/push race), server
1574 # different heads (someone else won commit/push race), server
1577 # aborts.
1575 # aborts.
1578
1576
1579 ret = self.prepush(remote, force, revs)
1577 ret = self.prepush(remote, force, revs)
1580 if ret[0] is not None:
1578 if ret[0] is not None:
1581 cg, remote_heads = ret
1579 cg, remote_heads = ret
1582 if force: remote_heads = ['force']
1580 if force: remote_heads = ['force']
1583 return remote.unbundle(cg, remote_heads, 'push')
1581 return remote.unbundle(cg, remote_heads, 'push')
1584 return ret[1]
1582 return ret[1]
1585
1583
1586 def changegroupinfo(self, nodes, source):
1584 def changegroupinfo(self, nodes, source):
1587 if self.ui.verbose or source == 'bundle':
1585 if self.ui.verbose or source == 'bundle':
1588 self.ui.status(_("%d changesets found\n") % len(nodes))
1586 self.ui.status(_("%d changesets found\n") % len(nodes))
1589 if self.ui.debugflag:
1587 if self.ui.debugflag:
1590 self.ui.debug(_("list of changesets:\n"))
1588 self.ui.debug(_("list of changesets:\n"))
1591 for node in nodes:
1589 for node in nodes:
1592 self.ui.debug("%s\n" % hex(node))
1590 self.ui.debug("%s\n" % hex(node))
1593
1591
1594 def changegroupsubset(self, bases, heads, source, extranodes=None):
1592 def changegroupsubset(self, bases, heads, source, extranodes=None):
1595 """This function generates a changegroup consisting of all the nodes
1593 """This function generates a changegroup consisting of all the nodes
1596 that are descendents of any of the bases, and ancestors of any of
1594 that are descendents of any of the bases, and ancestors of any of
1597 the heads.
1595 the heads.
1598
1596
1599 It is fairly complex as determining which filenodes and which
1597 It is fairly complex as determining which filenodes and which
1600 manifest nodes need to be included for the changeset to be complete
1598 manifest nodes need to be included for the changeset to be complete
1601 is non-trivial.
1599 is non-trivial.
1602
1600
1603 Another wrinkle is doing the reverse, figuring out which changeset in
1601 Another wrinkle is doing the reverse, figuring out which changeset in
1604 the changegroup a particular filenode or manifestnode belongs to.
1602 the changegroup a particular filenode or manifestnode belongs to.
1605
1603
1606 The caller can specify some nodes that must be included in the
1604 The caller can specify some nodes that must be included in the
1607 changegroup using the extranodes argument. It should be a dict
1605 changegroup using the extranodes argument. It should be a dict
1608 where the keys are the filenames (or 1 for the manifest), and the
1606 where the keys are the filenames (or 1 for the manifest), and the
1609 values are lists of (node, linknode) tuples, where node is a wanted
1607 values are lists of (node, linknode) tuples, where node is a wanted
1610 node and linknode is the changelog node that should be transmitted as
1608 node and linknode is the changelog node that should be transmitted as
1611 the linkrev.
1609 the linkrev.
1612 """
1610 """
1613
1611
1614 if extranodes is None:
1612 if extranodes is None:
1615 # can we go through the fast path ?
1613 # can we go through the fast path ?
1616 heads.sort()
1614 heads.sort()
1617 allheads = self.heads()
1615 allheads = self.heads()
1618 allheads.sort()
1616 allheads.sort()
1619 if heads == allheads:
1617 if heads == allheads:
1620 common = []
1618 common = []
1621 # parents of bases are known from both sides
1619 # parents of bases are known from both sides
1622 for n in bases:
1620 for n in bases:
1623 for p in self.changelog.parents(n):
1621 for p in self.changelog.parents(n):
1624 if p != nullid:
1622 if p != nullid:
1625 common.append(p)
1623 common.append(p)
1626 return self._changegroup(common, source)
1624 return self._changegroup(common, source)
1627
1625
1628 self.hook('preoutgoing', throw=True, source=source)
1626 self.hook('preoutgoing', throw=True, source=source)
1629
1627
1630 # Set up some initial variables
1628 # Set up some initial variables
1631 # Make it easy to refer to self.changelog
1629 # Make it easy to refer to self.changelog
1632 cl = self.changelog
1630 cl = self.changelog
1633 # msng is short for missing - compute the list of changesets in this
1631 # msng is short for missing - compute the list of changesets in this
1634 # changegroup.
1632 # changegroup.
1635 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1633 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1636 self.changegroupinfo(msng_cl_lst, source)
1634 self.changegroupinfo(msng_cl_lst, source)
1637 # Some bases may turn out to be superfluous, and some heads may be
1635 # Some bases may turn out to be superfluous, and some heads may be
1638 # too. nodesbetween will return the minimal set of bases and heads
1636 # too. nodesbetween will return the minimal set of bases and heads
1639 # necessary to re-create the changegroup.
1637 # necessary to re-create the changegroup.
1640
1638
1641 # Known heads are the list of heads that it is assumed the recipient
1639 # Known heads are the list of heads that it is assumed the recipient
1642 # of this changegroup will know about.
1640 # of this changegroup will know about.
1643 knownheads = {}
1641 knownheads = {}
1644 # We assume that all parents of bases are known heads.
1642 # We assume that all parents of bases are known heads.
1645 for n in bases:
1643 for n in bases:
1646 for p in cl.parents(n):
1644 for p in cl.parents(n):
1647 if p != nullid:
1645 if p != nullid:
1648 knownheads[p] = 1
1646 knownheads[p] = 1
1649 knownheads = knownheads.keys()
1647 knownheads = knownheads.keys()
1650 if knownheads:
1648 if knownheads:
1651 # Now that we know what heads are known, we can compute which
1649 # Now that we know what heads are known, we can compute which
1652 # changesets are known. The recipient must know about all
1650 # changesets are known. The recipient must know about all
1653 # changesets required to reach the known heads from the null
1651 # changesets required to reach the known heads from the null
1654 # changeset.
1652 # changeset.
1655 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1653 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1656 junk = None
1654 junk = None
1657 # Transform the list into a set.
1655 # Transform the list into a set.
1658 has_cl_set = set(has_cl_set)
1656 has_cl_set = set(has_cl_set)
1659 else:
1657 else:
1660 # If there were no known heads, the recipient cannot be assumed to
1658 # If there were no known heads, the recipient cannot be assumed to
1661 # know about any changesets.
1659 # know about any changesets.
1662 has_cl_set = set()
1660 has_cl_set = set()
1663
1661
1664 # Make it easy to refer to self.manifest
1662 # Make it easy to refer to self.manifest
1665 mnfst = self.manifest
1663 mnfst = self.manifest
1666 # We don't know which manifests are missing yet
1664 # We don't know which manifests are missing yet
1667 msng_mnfst_set = {}
1665 msng_mnfst_set = {}
1668 # Nor do we know which filenodes are missing.
1666 # Nor do we know which filenodes are missing.
1669 msng_filenode_set = {}
1667 msng_filenode_set = {}
1670
1668
1671 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1669 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1672 junk = None
1670 junk = None
1673
1671
1674 # A changeset always belongs to itself, so the changenode lookup
1672 # A changeset always belongs to itself, so the changenode lookup
1675 # function for a changenode is identity.
1673 # function for a changenode is identity.
1676 def identity(x):
1674 def identity(x):
1677 return x
1675 return x
1678
1676
1679 # A function generating function. Sets up an environment for the
1677 # A function generating function. Sets up an environment for the
1680 # inner function.
1678 # inner function.
1681 def cmp_by_rev_func(revlog):
1679 def cmp_by_rev_func(revlog):
1682 # Compare two nodes by their revision number in the environment's
1680 # Compare two nodes by their revision number in the environment's
1683 # revision history. Since the revision number both represents the
1681 # revision history. Since the revision number both represents the
1684 # most efficient order to read the nodes in, and represents a
1682 # most efficient order to read the nodes in, and represents a
1685 # topological sorting of the nodes, this function is often useful.
1683 # topological sorting of the nodes, this function is often useful.
1686 def cmp_by_rev(a, b):
1684 def cmp_by_rev(a, b):
1687 return cmp(revlog.rev(a), revlog.rev(b))
1685 return cmp(revlog.rev(a), revlog.rev(b))
1688 return cmp_by_rev
1686 return cmp_by_rev
1689
1687
1690 # If we determine that a particular file or manifest node must be a
1688 # If we determine that a particular file or manifest node must be a
1691 # node that the recipient of the changegroup will already have, we can
1689 # node that the recipient of the changegroup will already have, we can
1692 # also assume the recipient will have all the parents. This function
1690 # also assume the recipient will have all the parents. This function
1693 # prunes them from the set of missing nodes.
1691 # prunes them from the set of missing nodes.
1694 def prune_parents(revlog, hasset, msngset):
1692 def prune_parents(revlog, hasset, msngset):
1695 haslst = hasset.keys()
1693 haslst = hasset.keys()
1696 haslst.sort(cmp_by_rev_func(revlog))
1694 haslst.sort(cmp_by_rev_func(revlog))
1697 for node in haslst:
1695 for node in haslst:
1698 parentlst = [p for p in revlog.parents(node) if p != nullid]
1696 parentlst = [p for p in revlog.parents(node) if p != nullid]
1699 while parentlst:
1697 while parentlst:
1700 n = parentlst.pop()
1698 n = parentlst.pop()
1701 if n not in hasset:
1699 if n not in hasset:
1702 hasset[n] = 1
1700 hasset[n] = 1
1703 p = [p for p in revlog.parents(n) if p != nullid]
1701 p = [p for p in revlog.parents(n) if p != nullid]
1704 parentlst.extend(p)
1702 parentlst.extend(p)
1705 for n in hasset:
1703 for n in hasset:
1706 msngset.pop(n, None)
1704 msngset.pop(n, None)
1707
1705
1708 # This is a function generating function used to set up an environment
1706 # This is a function generating function used to set up an environment
1709 # for the inner function to execute in.
1707 # for the inner function to execute in.
1710 def manifest_and_file_collector(changedfileset):
1708 def manifest_and_file_collector(changedfileset):
1711 # This is an information gathering function that gathers
1709 # This is an information gathering function that gathers
1712 # information from each changeset node that goes out as part of
1710 # information from each changeset node that goes out as part of
1713 # the changegroup. The information gathered is a list of which
1711 # the changegroup. The information gathered is a list of which
1714 # manifest nodes are potentially required (the recipient may
1712 # manifest nodes are potentially required (the recipient may
1715 # already have them) and total list of all files which were
1713 # already have them) and total list of all files which were
1716 # changed in any changeset in the changegroup.
1714 # changed in any changeset in the changegroup.
1717 #
1715 #
1718 # We also remember the first changenode we saw any manifest
1716 # We also remember the first changenode we saw any manifest
1719 # referenced by so we can later determine which changenode 'owns'
1717 # referenced by so we can later determine which changenode 'owns'
1720 # the manifest.
1718 # the manifest.
1721 def collect_manifests_and_files(clnode):
1719 def collect_manifests_and_files(clnode):
1722 c = cl.read(clnode)
1720 c = cl.read(clnode)
1723 for f in c[3]:
1721 for f in c[3]:
1724 # This is to make sure we only have one instance of each
1722 # This is to make sure we only have one instance of each
1725 # filename string for each filename.
1723 # filename string for each filename.
1726 changedfileset.setdefault(f, f)
1724 changedfileset.setdefault(f, f)
1727 msng_mnfst_set.setdefault(c[0], clnode)
1725 msng_mnfst_set.setdefault(c[0], clnode)
1728 return collect_manifests_and_files
1726 return collect_manifests_and_files
1729
1727
1730 # Figure out which manifest nodes (of the ones we think might be part
1728 # Figure out which manifest nodes (of the ones we think might be part
1731 # of the changegroup) the recipient must know about and remove them
1729 # of the changegroup) the recipient must know about and remove them
1732 # from the changegroup.
1730 # from the changegroup.
1733 def prune_manifests():
1731 def prune_manifests():
1734 has_mnfst_set = {}
1732 has_mnfst_set = {}
1735 for n in msng_mnfst_set:
1733 for n in msng_mnfst_set:
1736 # If a 'missing' manifest thinks it belongs to a changenode
1734 # If a 'missing' manifest thinks it belongs to a changenode
1737 # the recipient is assumed to have, obviously the recipient
1735 # the recipient is assumed to have, obviously the recipient
1738 # must have that manifest.
1736 # must have that manifest.
1739 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1737 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1740 if linknode in has_cl_set:
1738 if linknode in has_cl_set:
1741 has_mnfst_set[n] = 1
1739 has_mnfst_set[n] = 1
1742 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1740 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1743
1741
1744 # Use the information collected in collect_manifests_and_files to say
1742 # Use the information collected in collect_manifests_and_files to say
1745 # which changenode any manifestnode belongs to.
1743 # which changenode any manifestnode belongs to.
1746 def lookup_manifest_link(mnfstnode):
1744 def lookup_manifest_link(mnfstnode):
1747 return msng_mnfst_set[mnfstnode]
1745 return msng_mnfst_set[mnfstnode]
1748
1746
1749 # A function generating function that sets up the initial environment
1747 # A function generating function that sets up the initial environment
1750 # the inner function.
1748 # the inner function.
1751 def filenode_collector(changedfiles):
1749 def filenode_collector(changedfiles):
1752 next_rev = [0]
1750 next_rev = [0]
1753 # This gathers information from each manifestnode included in the
1751 # This gathers information from each manifestnode included in the
1754 # changegroup about which filenodes the manifest node references
1752 # changegroup about which filenodes the manifest node references
1755 # so we can include those in the changegroup too.
1753 # so we can include those in the changegroup too.
1756 #
1754 #
1757 # It also remembers which changenode each filenode belongs to. It
1755 # It also remembers which changenode each filenode belongs to. It
1758 # does this by assuming the a filenode belongs to the changenode
1756 # does this by assuming the a filenode belongs to the changenode
1759 # the first manifest that references it belongs to.
1757 # the first manifest that references it belongs to.
1760 def collect_msng_filenodes(mnfstnode):
1758 def collect_msng_filenodes(mnfstnode):
1761 r = mnfst.rev(mnfstnode)
1759 r = mnfst.rev(mnfstnode)
1762 if r == next_rev[0]:
1760 if r == next_rev[0]:
1763 # If the last rev we looked at was the one just previous,
1761 # If the last rev we looked at was the one just previous,
1764 # we only need to see a diff.
1762 # we only need to see a diff.
1765 deltamf = mnfst.readdelta(mnfstnode)
1763 deltamf = mnfst.readdelta(mnfstnode)
1766 # For each line in the delta
1764 # For each line in the delta
1767 for f, fnode in deltamf.iteritems():
1765 for f, fnode in deltamf.iteritems():
1768 f = changedfiles.get(f, None)
1766 f = changedfiles.get(f, None)
1769 # And if the file is in the list of files we care
1767 # And if the file is in the list of files we care
1770 # about.
1768 # about.
1771 if f is not None:
1769 if f is not None:
1772 # Get the changenode this manifest belongs to
1770 # Get the changenode this manifest belongs to
1773 clnode = msng_mnfst_set[mnfstnode]
1771 clnode = msng_mnfst_set[mnfstnode]
1774 # Create the set of filenodes for the file if
1772 # Create the set of filenodes for the file if
1775 # there isn't one already.
1773 # there isn't one already.
1776 ndset = msng_filenode_set.setdefault(f, {})
1774 ndset = msng_filenode_set.setdefault(f, {})
1777 # And set the filenode's changelog node to the
1775 # And set the filenode's changelog node to the
1778 # manifest's if it hasn't been set already.
1776 # manifest's if it hasn't been set already.
1779 ndset.setdefault(fnode, clnode)
1777 ndset.setdefault(fnode, clnode)
1780 else:
1778 else:
1781 # Otherwise we need a full manifest.
1779 # Otherwise we need a full manifest.
1782 m = mnfst.read(mnfstnode)
1780 m = mnfst.read(mnfstnode)
1783 # For every file in we care about.
1781 # For every file in we care about.
1784 for f in changedfiles:
1782 for f in changedfiles:
1785 fnode = m.get(f, None)
1783 fnode = m.get(f, None)
1786 # If it's in the manifest
1784 # If it's in the manifest
1787 if fnode is not None:
1785 if fnode is not None:
1788 # See comments above.
1786 # See comments above.
1789 clnode = msng_mnfst_set[mnfstnode]
1787 clnode = msng_mnfst_set[mnfstnode]
1790 ndset = msng_filenode_set.setdefault(f, {})
1788 ndset = msng_filenode_set.setdefault(f, {})
1791 ndset.setdefault(fnode, clnode)
1789 ndset.setdefault(fnode, clnode)
1792 # Remember the revision we hope to see next.
1790 # Remember the revision we hope to see next.
1793 next_rev[0] = r + 1
1791 next_rev[0] = r + 1
1794 return collect_msng_filenodes
1792 return collect_msng_filenodes
1795
1793
1796 # We have a list of filenodes we think we need for a file, lets remove
1794 # We have a list of filenodes we think we need for a file, lets remove
1797 # all those we know the recipient must have.
1795 # all those we know the recipient must have.
1798 def prune_filenodes(f, filerevlog):
1796 def prune_filenodes(f, filerevlog):
1799 msngset = msng_filenode_set[f]
1797 msngset = msng_filenode_set[f]
1800 hasset = {}
1798 hasset = {}
1801 # If a 'missing' filenode thinks it belongs to a changenode we
1799 # If a 'missing' filenode thinks it belongs to a changenode we
1802 # assume the recipient must have, then the recipient must have
1800 # assume the recipient must have, then the recipient must have
1803 # that filenode.
1801 # that filenode.
1804 for n in msngset:
1802 for n in msngset:
1805 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1803 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1806 if clnode in has_cl_set:
1804 if clnode in has_cl_set:
1807 hasset[n] = 1
1805 hasset[n] = 1
1808 prune_parents(filerevlog, hasset, msngset)
1806 prune_parents(filerevlog, hasset, msngset)
1809
1807
1810 # A function generator function that sets up the a context for the
1808 # A function generator function that sets up the a context for the
1811 # inner function.
1809 # inner function.
1812 def lookup_filenode_link_func(fname):
1810 def lookup_filenode_link_func(fname):
1813 msngset = msng_filenode_set[fname]
1811 msngset = msng_filenode_set[fname]
1814 # Lookup the changenode the filenode belongs to.
1812 # Lookup the changenode the filenode belongs to.
1815 def lookup_filenode_link(fnode):
1813 def lookup_filenode_link(fnode):
1816 return msngset[fnode]
1814 return msngset[fnode]
1817 return lookup_filenode_link
1815 return lookup_filenode_link
1818
1816
1819 # Add the nodes that were explicitly requested.
1817 # Add the nodes that were explicitly requested.
1820 def add_extra_nodes(name, nodes):
1818 def add_extra_nodes(name, nodes):
1821 if not extranodes or name not in extranodes:
1819 if not extranodes or name not in extranodes:
1822 return
1820 return
1823
1821
1824 for node, linknode in extranodes[name]:
1822 for node, linknode in extranodes[name]:
1825 if node not in nodes:
1823 if node not in nodes:
1826 nodes[node] = linknode
1824 nodes[node] = linknode
1827
1825
1828 # Now that we have all theses utility functions to help out and
1826 # Now that we have all theses utility functions to help out and
1829 # logically divide up the task, generate the group.
1827 # logically divide up the task, generate the group.
1830 def gengroup():
1828 def gengroup():
1831 # The set of changed files starts empty.
1829 # The set of changed files starts empty.
1832 changedfiles = {}
1830 changedfiles = {}
1833 # Create a changenode group generator that will call our functions
1831 # Create a changenode group generator that will call our functions
1834 # back to lookup the owning changenode and collect information.
1832 # back to lookup the owning changenode and collect information.
1835 group = cl.group(msng_cl_lst, identity,
1833 group = cl.group(msng_cl_lst, identity,
1836 manifest_and_file_collector(changedfiles))
1834 manifest_and_file_collector(changedfiles))
1837 for chnk in group:
1835 for chnk in group:
1838 yield chnk
1836 yield chnk
1839
1837
1840 # The list of manifests has been collected by the generator
1838 # The list of manifests has been collected by the generator
1841 # calling our functions back.
1839 # calling our functions back.
1842 prune_manifests()
1840 prune_manifests()
1843 add_extra_nodes(1, msng_mnfst_set)
1841 add_extra_nodes(1, msng_mnfst_set)
1844 msng_mnfst_lst = msng_mnfst_set.keys()
1842 msng_mnfst_lst = msng_mnfst_set.keys()
1845 # Sort the manifestnodes by revision number.
1843 # Sort the manifestnodes by revision number.
1846 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1844 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1847 # Create a generator for the manifestnodes that calls our lookup
1845 # Create a generator for the manifestnodes that calls our lookup
1848 # and data collection functions back.
1846 # and data collection functions back.
1849 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1847 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1850 filenode_collector(changedfiles))
1848 filenode_collector(changedfiles))
1851 for chnk in group:
1849 for chnk in group:
1852 yield chnk
1850 yield chnk
1853
1851
1854 # These are no longer needed, dereference and toss the memory for
1852 # These are no longer needed, dereference and toss the memory for
1855 # them.
1853 # them.
1856 msng_mnfst_lst = None
1854 msng_mnfst_lst = None
1857 msng_mnfst_set.clear()
1855 msng_mnfst_set.clear()
1858
1856
1859 if extranodes:
1857 if extranodes:
1860 for fname in extranodes:
1858 for fname in extranodes:
1861 if isinstance(fname, int):
1859 if isinstance(fname, int):
1862 continue
1860 continue
1863 msng_filenode_set.setdefault(fname, {})
1861 msng_filenode_set.setdefault(fname, {})
1864 changedfiles[fname] = 1
1862 changedfiles[fname] = 1
1865 # Go through all our files in order sorted by name.
1863 # Go through all our files in order sorted by name.
1866 for fname in sorted(changedfiles):
1864 for fname in sorted(changedfiles):
1867 filerevlog = self.file(fname)
1865 filerevlog = self.file(fname)
1868 if not len(filerevlog):
1866 if not len(filerevlog):
1869 raise util.Abort(_("empty or missing revlog for %s") % fname)
1867 raise util.Abort(_("empty or missing revlog for %s") % fname)
1870 # Toss out the filenodes that the recipient isn't really
1868 # Toss out the filenodes that the recipient isn't really
1871 # missing.
1869 # missing.
1872 if fname in msng_filenode_set:
1870 if fname in msng_filenode_set:
1873 prune_filenodes(fname, filerevlog)
1871 prune_filenodes(fname, filerevlog)
1874 add_extra_nodes(fname, msng_filenode_set[fname])
1872 add_extra_nodes(fname, msng_filenode_set[fname])
1875 msng_filenode_lst = msng_filenode_set[fname].keys()
1873 msng_filenode_lst = msng_filenode_set[fname].keys()
1876 else:
1874 else:
1877 msng_filenode_lst = []
1875 msng_filenode_lst = []
1878 # If any filenodes are left, generate the group for them,
1876 # If any filenodes are left, generate the group for them,
1879 # otherwise don't bother.
1877 # otherwise don't bother.
1880 if len(msng_filenode_lst) > 0:
1878 if len(msng_filenode_lst) > 0:
1881 yield changegroup.chunkheader(len(fname))
1879 yield changegroup.chunkheader(len(fname))
1882 yield fname
1880 yield fname
1883 # Sort the filenodes by their revision #
1881 # Sort the filenodes by their revision #
1884 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1882 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1885 # Create a group generator and only pass in a changenode
1883 # Create a group generator and only pass in a changenode
1886 # lookup function as we need to collect no information
1884 # lookup function as we need to collect no information
1887 # from filenodes.
1885 # from filenodes.
1888 group = filerevlog.group(msng_filenode_lst,
1886 group = filerevlog.group(msng_filenode_lst,
1889 lookup_filenode_link_func(fname))
1887 lookup_filenode_link_func(fname))
1890 for chnk in group:
1888 for chnk in group:
1891 yield chnk
1889 yield chnk
1892 if fname in msng_filenode_set:
1890 if fname in msng_filenode_set:
1893 # Don't need this anymore, toss it to free memory.
1891 # Don't need this anymore, toss it to free memory.
1894 del msng_filenode_set[fname]
1892 del msng_filenode_set[fname]
1895 # Signal that no more groups are left.
1893 # Signal that no more groups are left.
1896 yield changegroup.closechunk()
1894 yield changegroup.closechunk()
1897
1895
1898 if msng_cl_lst:
1896 if msng_cl_lst:
1899 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1897 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1900
1898
1901 return util.chunkbuffer(gengroup())
1899 return util.chunkbuffer(gengroup())
1902
1900
1903 def changegroup(self, basenodes, source):
1901 def changegroup(self, basenodes, source):
1904 # to avoid a race we use changegroupsubset() (issue1320)
1902 # to avoid a race we use changegroupsubset() (issue1320)
1905 return self.changegroupsubset(basenodes, self.heads(), source)
1903 return self.changegroupsubset(basenodes, self.heads(), source)
1906
1904
1907 def _changegroup(self, common, source):
1905 def _changegroup(self, common, source):
1908 """Generate a changegroup of all nodes that we have that a recipient
1906 """Generate a changegroup of all nodes that we have that a recipient
1909 doesn't.
1907 doesn't.
1910
1908
1911 This is much easier than the previous function as we can assume that
1909 This is much easier than the previous function as we can assume that
1912 the recipient has any changenode we aren't sending them.
1910 the recipient has any changenode we aren't sending them.
1913
1911
1914 common is the set of common nodes between remote and self"""
1912 common is the set of common nodes between remote and self"""
1915
1913
1916 self.hook('preoutgoing', throw=True, source=source)
1914 self.hook('preoutgoing', throw=True, source=source)
1917
1915
1918 cl = self.changelog
1916 cl = self.changelog
1919 nodes = cl.findmissing(common)
1917 nodes = cl.findmissing(common)
1920 revset = set([cl.rev(n) for n in nodes])
1918 revset = set([cl.rev(n) for n in nodes])
1921 self.changegroupinfo(nodes, source)
1919 self.changegroupinfo(nodes, source)
1922
1920
1923 def identity(x):
1921 def identity(x):
1924 return x
1922 return x
1925
1923
1926 def gennodelst(log):
1924 def gennodelst(log):
1927 for r in log:
1925 for r in log:
1928 if log.linkrev(r) in revset:
1926 if log.linkrev(r) in revset:
1929 yield log.node(r)
1927 yield log.node(r)
1930
1928
1931 def changed_file_collector(changedfileset):
1929 def changed_file_collector(changedfileset):
1932 def collect_changed_files(clnode):
1930 def collect_changed_files(clnode):
1933 c = cl.read(clnode)
1931 c = cl.read(clnode)
1934 for fname in c[3]:
1932 for fname in c[3]:
1935 changedfileset[fname] = 1
1933 changedfileset[fname] = 1
1936 return collect_changed_files
1934 return collect_changed_files
1937
1935
1938 def lookuprevlink_func(revlog):
1936 def lookuprevlink_func(revlog):
1939 def lookuprevlink(n):
1937 def lookuprevlink(n):
1940 return cl.node(revlog.linkrev(revlog.rev(n)))
1938 return cl.node(revlog.linkrev(revlog.rev(n)))
1941 return lookuprevlink
1939 return lookuprevlink
1942
1940
1943 def gengroup():
1941 def gengroup():
1944 # construct a list of all changed files
1942 # construct a list of all changed files
1945 changedfiles = {}
1943 changedfiles = {}
1946
1944
1947 for chnk in cl.group(nodes, identity,
1945 for chnk in cl.group(nodes, identity,
1948 changed_file_collector(changedfiles)):
1946 changed_file_collector(changedfiles)):
1949 yield chnk
1947 yield chnk
1950
1948
1951 mnfst = self.manifest
1949 mnfst = self.manifest
1952 nodeiter = gennodelst(mnfst)
1950 nodeiter = gennodelst(mnfst)
1953 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1951 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1954 yield chnk
1952 yield chnk
1955
1953
1956 for fname in sorted(changedfiles):
1954 for fname in sorted(changedfiles):
1957 filerevlog = self.file(fname)
1955 filerevlog = self.file(fname)
1958 if not len(filerevlog):
1956 if not len(filerevlog):
1959 raise util.Abort(_("empty or missing revlog for %s") % fname)
1957 raise util.Abort(_("empty or missing revlog for %s") % fname)
1960 nodeiter = gennodelst(filerevlog)
1958 nodeiter = gennodelst(filerevlog)
1961 nodeiter = list(nodeiter)
1959 nodeiter = list(nodeiter)
1962 if nodeiter:
1960 if nodeiter:
1963 yield changegroup.chunkheader(len(fname))
1961 yield changegroup.chunkheader(len(fname))
1964 yield fname
1962 yield fname
1965 lookup = lookuprevlink_func(filerevlog)
1963 lookup = lookuprevlink_func(filerevlog)
1966 for chnk in filerevlog.group(nodeiter, lookup):
1964 for chnk in filerevlog.group(nodeiter, lookup):
1967 yield chnk
1965 yield chnk
1968
1966
1969 yield changegroup.closechunk()
1967 yield changegroup.closechunk()
1970
1968
1971 if nodes:
1969 if nodes:
1972 self.hook('outgoing', node=hex(nodes[0]), source=source)
1970 self.hook('outgoing', node=hex(nodes[0]), source=source)
1973
1971
1974 return util.chunkbuffer(gengroup())
1972 return util.chunkbuffer(gengroup())
1975
1973
1976 def addchangegroup(self, source, srctype, url, emptyok=False):
1974 def addchangegroup(self, source, srctype, url, emptyok=False):
1977 """add changegroup to repo.
1975 """add changegroup to repo.
1978
1976
1979 return values:
1977 return values:
1980 - nothing changed or no source: 0
1978 - nothing changed or no source: 0
1981 - more heads than before: 1+added heads (2..n)
1979 - more heads than before: 1+added heads (2..n)
1982 - less heads than before: -1-removed heads (-2..-n)
1980 - less heads than before: -1-removed heads (-2..-n)
1983 - number of heads stays the same: 1
1981 - number of heads stays the same: 1
1984 """
1982 """
1985 def csmap(x):
1983 def csmap(x):
1986 self.ui.debug(_("add changeset %s\n") % short(x))
1984 self.ui.debug(_("add changeset %s\n") % short(x))
1987 return len(cl)
1985 return len(cl)
1988
1986
1989 def revmap(x):
1987 def revmap(x):
1990 return cl.rev(x)
1988 return cl.rev(x)
1991
1989
1992 if not source:
1990 if not source:
1993 return 0
1991 return 0
1994
1992
1995 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1993 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1996
1994
1997 changesets = files = revisions = 0
1995 changesets = files = revisions = 0
1998
1996
1999 # write changelog data to temp files so concurrent readers will not see
1997 # write changelog data to temp files so concurrent readers will not see
2000 # inconsistent view
1998 # inconsistent view
2001 cl = self.changelog
1999 cl = self.changelog
2002 cl.delayupdate()
2000 cl.delayupdate()
2003 oldheads = len(cl.heads())
2001 oldheads = len(cl.heads())
2004
2002
2005 tr = self.transaction()
2003 tr = self.transaction()
2006 try:
2004 try:
2007 trp = weakref.proxy(tr)
2005 trp = weakref.proxy(tr)
2008 # pull off the changeset group
2006 # pull off the changeset group
2009 self.ui.status(_("adding changesets\n"))
2007 self.ui.status(_("adding changesets\n"))
2010 clstart = len(cl)
2008 clstart = len(cl)
2011 chunkiter = changegroup.chunkiter(source)
2009 chunkiter = changegroup.chunkiter(source)
2012 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2010 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2013 raise util.Abort(_("received changelog group is empty"))
2011 raise util.Abort(_("received changelog group is empty"))
2014 clend = len(cl)
2012 clend = len(cl)
2015 changesets = clend - clstart
2013 changesets = clend - clstart
2016
2014
2017 # pull off the manifest group
2015 # pull off the manifest group
2018 self.ui.status(_("adding manifests\n"))
2016 self.ui.status(_("adding manifests\n"))
2019 chunkiter = changegroup.chunkiter(source)
2017 chunkiter = changegroup.chunkiter(source)
2020 # no need to check for empty manifest group here:
2018 # no need to check for empty manifest group here:
2021 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2019 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2022 # no new manifest will be created and the manifest group will
2020 # no new manifest will be created and the manifest group will
2023 # be empty during the pull
2021 # be empty during the pull
2024 self.manifest.addgroup(chunkiter, revmap, trp)
2022 self.manifest.addgroup(chunkiter, revmap, trp)
2025
2023
2026 # process the files
2024 # process the files
2027 self.ui.status(_("adding file changes\n"))
2025 self.ui.status(_("adding file changes\n"))
2028 while 1:
2026 while 1:
2029 f = changegroup.getchunk(source)
2027 f = changegroup.getchunk(source)
2030 if not f:
2028 if not f:
2031 break
2029 break
2032 self.ui.debug(_("adding %s revisions\n") % f)
2030 self.ui.debug(_("adding %s revisions\n") % f)
2033 fl = self.file(f)
2031 fl = self.file(f)
2034 o = len(fl)
2032 o = len(fl)
2035 chunkiter = changegroup.chunkiter(source)
2033 chunkiter = changegroup.chunkiter(source)
2036 if fl.addgroup(chunkiter, revmap, trp) is None:
2034 if fl.addgroup(chunkiter, revmap, trp) is None:
2037 raise util.Abort(_("received file revlog group is empty"))
2035 raise util.Abort(_("received file revlog group is empty"))
2038 revisions += len(fl) - o
2036 revisions += len(fl) - o
2039 files += 1
2037 files += 1
2040
2038
2041 newheads = len(cl.heads())
2039 newheads = len(cl.heads())
2042 heads = ""
2040 heads = ""
2043 if oldheads and newheads != oldheads:
2041 if oldheads and newheads != oldheads:
2044 heads = _(" (%+d heads)") % (newheads - oldheads)
2042 heads = _(" (%+d heads)") % (newheads - oldheads)
2045
2043
2046 self.ui.status(_("added %d changesets"
2044 self.ui.status(_("added %d changesets"
2047 " with %d changes to %d files%s\n")
2045 " with %d changes to %d files%s\n")
2048 % (changesets, revisions, files, heads))
2046 % (changesets, revisions, files, heads))
2049
2047
2050 if changesets > 0:
2048 if changesets > 0:
2051 p = lambda: cl.writepending() and self.root or ""
2049 p = lambda: cl.writepending() and self.root or ""
2052 self.hook('pretxnchangegroup', throw=True,
2050 self.hook('pretxnchangegroup', throw=True,
2053 node=hex(cl.node(clstart)), source=srctype,
2051 node=hex(cl.node(clstart)), source=srctype,
2054 url=url, pending=p)
2052 url=url, pending=p)
2055
2053
2056 # make changelog see real files again
2054 # make changelog see real files again
2057 cl.finalize(trp)
2055 cl.finalize(trp)
2058
2056
2059 tr.close()
2057 tr.close()
2060 finally:
2058 finally:
2061 del tr
2059 del tr
2062
2060
2063 if changesets > 0:
2061 if changesets > 0:
2064 # forcefully update the on-disk branch cache
2062 # forcefully update the on-disk branch cache
2065 self.ui.debug(_("updating the branch cache\n"))
2063 self.ui.debug(_("updating the branch cache\n"))
2066 self.branchtags()
2064 self.branchtags()
2067 self.hook("changegroup", node=hex(cl.node(clstart)),
2065 self.hook("changegroup", node=hex(cl.node(clstart)),
2068 source=srctype, url=url)
2066 source=srctype, url=url)
2069
2067
2070 for i in xrange(clstart, clend):
2068 for i in xrange(clstart, clend):
2071 self.hook("incoming", node=hex(cl.node(i)),
2069 self.hook("incoming", node=hex(cl.node(i)),
2072 source=srctype, url=url)
2070 source=srctype, url=url)
2073
2071
2074 # never return 0 here:
2072 # never return 0 here:
2075 if newheads < oldheads:
2073 if newheads < oldheads:
2076 return newheads - oldheads - 1
2074 return newheads - oldheads - 1
2077 else:
2075 else:
2078 return newheads - oldheads + 1
2076 return newheads - oldheads + 1
2079
2077
2080
2078
2081 def stream_in(self, remote):
2079 def stream_in(self, remote):
2082 fp = remote.stream_out()
2080 fp = remote.stream_out()
2083 l = fp.readline()
2081 l = fp.readline()
2084 try:
2082 try:
2085 resp = int(l)
2083 resp = int(l)
2086 except ValueError:
2084 except ValueError:
2087 raise error.ResponseError(
2085 raise error.ResponseError(
2088 _('Unexpected response from remote server:'), l)
2086 _('Unexpected response from remote server:'), l)
2089 if resp == 1:
2087 if resp == 1:
2090 raise util.Abort(_('operation forbidden by server'))
2088 raise util.Abort(_('operation forbidden by server'))
2091 elif resp == 2:
2089 elif resp == 2:
2092 raise util.Abort(_('locking the remote repository failed'))
2090 raise util.Abort(_('locking the remote repository failed'))
2093 elif resp != 0:
2091 elif resp != 0:
2094 raise util.Abort(_('the server sent an unknown error code'))
2092 raise util.Abort(_('the server sent an unknown error code'))
2095 self.ui.status(_('streaming all changes\n'))
2093 self.ui.status(_('streaming all changes\n'))
2096 l = fp.readline()
2094 l = fp.readline()
2097 try:
2095 try:
2098 total_files, total_bytes = map(int, l.split(' ', 1))
2096 total_files, total_bytes = map(int, l.split(' ', 1))
2099 except (ValueError, TypeError):
2097 except (ValueError, TypeError):
2100 raise error.ResponseError(
2098 raise error.ResponseError(
2101 _('Unexpected response from remote server:'), l)
2099 _('Unexpected response from remote server:'), l)
2102 self.ui.status(_('%d files to transfer, %s of data\n') %
2100 self.ui.status(_('%d files to transfer, %s of data\n') %
2103 (total_files, util.bytecount(total_bytes)))
2101 (total_files, util.bytecount(total_bytes)))
2104 start = time.time()
2102 start = time.time()
2105 for i in xrange(total_files):
2103 for i in xrange(total_files):
2106 # XXX doesn't support '\n' or '\r' in filenames
2104 # XXX doesn't support '\n' or '\r' in filenames
2107 l = fp.readline()
2105 l = fp.readline()
2108 try:
2106 try:
2109 name, size = l.split('\0', 1)
2107 name, size = l.split('\0', 1)
2110 size = int(size)
2108 size = int(size)
2111 except (ValueError, TypeError):
2109 except (ValueError, TypeError):
2112 raise error.ResponseError(
2110 raise error.ResponseError(
2113 _('Unexpected response from remote server:'), l)
2111 _('Unexpected response from remote server:'), l)
2114 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2112 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2115 ofp = self.sopener(name, 'w')
2113 ofp = self.sopener(name, 'w')
2116 for chunk in util.filechunkiter(fp, limit=size):
2114 for chunk in util.filechunkiter(fp, limit=size):
2117 ofp.write(chunk)
2115 ofp.write(chunk)
2118 ofp.close()
2116 ofp.close()
2119 elapsed = time.time() - start
2117 elapsed = time.time() - start
2120 if elapsed <= 0:
2118 if elapsed <= 0:
2121 elapsed = 0.001
2119 elapsed = 0.001
2122 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2120 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2123 (util.bytecount(total_bytes), elapsed,
2121 (util.bytecount(total_bytes), elapsed,
2124 util.bytecount(total_bytes / elapsed)))
2122 util.bytecount(total_bytes / elapsed)))
2125 self.invalidate()
2123 self.invalidate()
2126 return len(self.heads()) + 1
2124 return len(self.heads()) + 1
2127
2125
2128 def clone(self, remote, heads=[], stream=False):
2126 def clone(self, remote, heads=[], stream=False):
2129 '''clone remote repository.
2127 '''clone remote repository.
2130
2128
2131 keyword arguments:
2129 keyword arguments:
2132 heads: list of revs to clone (forces use of pull)
2130 heads: list of revs to clone (forces use of pull)
2133 stream: use streaming clone if possible'''
2131 stream: use streaming clone if possible'''
2134
2132
2135 # now, all clients that can request uncompressed clones can
2133 # now, all clients that can request uncompressed clones can
2136 # read repo formats supported by all servers that can serve
2134 # read repo formats supported by all servers that can serve
2137 # them.
2135 # them.
2138
2136
2139 # if revlog format changes, client will have to check version
2137 # if revlog format changes, client will have to check version
2140 # and format flags on "stream" capability, and use
2138 # and format flags on "stream" capability, and use
2141 # uncompressed only if compatible.
2139 # uncompressed only if compatible.
2142
2140
2143 if stream and not heads and remote.capable('stream'):
2141 if stream and not heads and remote.capable('stream'):
2144 return self.stream_in(remote)
2142 return self.stream_in(remote)
2145 return self.pull(remote, heads)
2143 return self.pull(remote, heads)
2146
2144
2147 # used to avoid circular references so destructors work
2145 # used to avoid circular references so destructors work
2148 def aftertrans(files):
2146 def aftertrans(files):
2149 renamefiles = [tuple(t) for t in files]
2147 renamefiles = [tuple(t) for t in files]
2150 def a():
2148 def a():
2151 for src, dest in renamefiles:
2149 for src, dest in renamefiles:
2152 util.rename(src, dest)
2150 util.rename(src, dest)
2153 return a
2151 return a
2154
2152
2155 def instance(ui, path, create):
2153 def instance(ui, path, create):
2156 return localrepository(ui, util.drop_scheme('file', path), create)
2154 return localrepository(ui, util.drop_scheme('file', path), create)
2157
2155
2158 def islocal(path):
2156 def islocal(path):
2159 return True
2157 return True
General Comments 0
You need to be logged in to leave comments. Login now