##// END OF EJS Templates
tags: fold in _hgtagsnodes
Matt Mackall -
r8850:9db1c8e1 default
parent child Browse files
Show More
@@ -1,2179 +1,2179 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 self.tagscache = None
92 self.tagscache = None
93 self._tagstypecache = None
93 self._tagstypecache = None
94 self.branchcache = None
94 self.branchcache = None
95 self._ubranchcache = None # UTF-8 version of branchcache
95 self._ubranchcache = None # UTF-8 version of branchcache
96 self._branchcachetip = None
96 self._branchcachetip = None
97 self.nodetagscache = None
97 self.nodetagscache = None
98 self.filterpats = {}
98 self.filterpats = {}
99 self._datafilters = {}
99 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
101
101
102 @propertycache
102 @propertycache
103 def changelog(self):
103 def changelog(self):
104 c = changelog.changelog(self.sopener)
104 c = changelog.changelog(self.sopener)
105 if 'HG_PENDING' in os.environ:
105 if 'HG_PENDING' in os.environ:
106 p = os.environ['HG_PENDING']
106 p = os.environ['HG_PENDING']
107 if p.startswith(self.root):
107 if p.startswith(self.root):
108 c.readpending('00changelog.i.a')
108 c.readpending('00changelog.i.a')
109 self.sopener.defversion = c.version
109 self.sopener.defversion = c.version
110 return c
110 return c
111
111
112 @propertycache
112 @propertycache
113 def manifest(self):
113 def manifest(self):
114 return manifest.manifest(self.sopener)
114 return manifest.manifest(self.sopener)
115
115
116 @propertycache
116 @propertycache
117 def dirstate(self):
117 def dirstate(self):
118 return dirstate.dirstate(self.opener, self.ui, self.root)
118 return dirstate.dirstate(self.opener, self.ui, self.root)
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid is None:
121 if changeid is None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, extra={}):
143 def _tag(self, names, node, message, local, user, date, extra={}):
144 if isinstance(names, str):
144 if isinstance(names, str):
145 allchars = names
145 allchars = names
146 names = (names,)
146 names = (names,)
147 else:
147 else:
148 allchars = ''.join(names)
148 allchars = ''.join(names)
149 for c in self.tag_disallowed:
149 for c in self.tag_disallowed:
150 if c in allchars:
150 if c in allchars:
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152
152
153 for name in names:
153 for name in names:
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 local=local)
155 local=local)
156
156
157 def writetags(fp, names, munge, prevtags):
157 def writetags(fp, names, munge, prevtags):
158 fp.seek(0, 2)
158 fp.seek(0, 2)
159 if prevtags and prevtags[-1] != '\n':
159 if prevtags and prevtags[-1] != '\n':
160 fp.write('\n')
160 fp.write('\n')
161 for name in names:
161 for name in names:
162 m = munge and munge(name) or name
162 m = munge and munge(name) or name
163 if self._tagstypecache and name in self._tagstypecache:
163 if self._tagstypecache and name in self._tagstypecache:
164 old = self.tagscache.get(name, nullid)
164 old = self.tagscache.get(name, nullid)
165 fp.write('%s %s\n' % (hex(old), m))
165 fp.write('%s %s\n' % (hex(old), m))
166 fp.write('%s %s\n' % (hex(node), m))
166 fp.write('%s %s\n' % (hex(node), m))
167 fp.close()
167 fp.close()
168
168
169 prevtags = ''
169 prevtags = ''
170 if local:
170 if local:
171 try:
171 try:
172 fp = self.opener('localtags', 'r+')
172 fp = self.opener('localtags', 'r+')
173 except IOError:
173 except IOError:
174 fp = self.opener('localtags', 'a')
174 fp = self.opener('localtags', 'a')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177
177
178 # local tags are stored in the current charset
178 # local tags are stored in the current charset
179 writetags(fp, names, None, prevtags)
179 writetags(fp, names, None, prevtags)
180 for name in names:
180 for name in names:
181 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
182 return
182 return
183
183
184 try:
184 try:
185 fp = self.wfile('.hgtags', 'rb+')
185 fp = self.wfile('.hgtags', 'rb+')
186 except IOError:
186 except IOError:
187 fp = self.wfile('.hgtags', 'ab')
187 fp = self.wfile('.hgtags', 'ab')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # committed tags are stored in UTF-8
191 # committed tags are stored in UTF-8
192 writetags(fp, names, encoding.fromlocal, prevtags)
192 writetags(fp, names, encoding.fromlocal, prevtags)
193
193
194 if '.hgtags' not in self.dirstate:
194 if '.hgtags' not in self.dirstate:
195 self.add(['.hgtags'])
195 self.add(['.hgtags'])
196
196
197 m = match_.exact(self.root, '', ['.hgtags'])
197 m = match_.exact(self.root, '', ['.hgtags'])
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199
199
200 for name in names:
200 for name in names:
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202
202
203 return tagnode
203 return tagnode
204
204
205 def tag(self, names, node, message, local, user, date):
205 def tag(self, names, node, message, local, user, date):
206 '''tag a revision with one or more symbolic names.
206 '''tag a revision with one or more symbolic names.
207
207
208 names is a list of strings or, when adding a single tag, names may be a
208 names is a list of strings or, when adding a single tag, names may be a
209 string.
209 string.
210
210
211 if local is True, the tags are stored in a per-repository file.
211 if local is True, the tags are stored in a per-repository file.
212 otherwise, they are stored in the .hgtags file, and a new
212 otherwise, they are stored in the .hgtags file, and a new
213 changeset is committed with the change.
213 changeset is committed with the change.
214
214
215 keyword arguments:
215 keyword arguments:
216
216
217 local: whether to store tags in non-version-controlled file
217 local: whether to store tags in non-version-controlled file
218 (default False)
218 (default False)
219
219
220 message: commit message to use if committing
220 message: commit message to use if committing
221
221
222 user: name of user to use if committing
222 user: name of user to use if committing
223
223
224 date: date tuple to use if committing'''
224 date: date tuple to use if committing'''
225
225
226 for x in self.status()[:5]:
226 for x in self.status()[:5]:
227 if '.hgtags' in x:
227 if '.hgtags' in x:
228 raise util.Abort(_('working copy of .hgtags is changed '
228 raise util.Abort(_('working copy of .hgtags is changed '
229 '(please commit .hgtags manually)'))
229 '(please commit .hgtags manually)'))
230
230
231 self.tags() # instantiate the cache
231 self.tags() # instantiate the cache
232 self._tag(names, node, message, local, user, date)
232 self._tag(names, node, message, local, user, date)
233
233
234 def tags(self):
234 def tags(self):
235 '''return a mapping of tag to node'''
235 '''return a mapping of tag to node'''
236 if self.tagscache:
236 if self.tagscache:
237 return self.tagscache
237 return self.tagscache
238
238
239 globaltags = {}
239 globaltags = {}
240 tagtypes = {}
240 tagtypes = {}
241
241
242 def readtags(lines, fn, tagtype):
242 def readtags(lines, fn, tagtype):
243 filetags = {}
243 filetags = {}
244 count = 0
244 count = 0
245
245
246 def warn(msg):
246 def warn(msg):
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248
248
249 for l in lines:
249 for l in lines:
250 count += 1
250 count += 1
251 if not l:
251 if not l:
252 continue
252 continue
253 s = l.split(" ", 1)
253 s = l.split(" ", 1)
254 if len(s) != 2:
254 if len(s) != 2:
255 warn(_("cannot parse entry"))
255 warn(_("cannot parse entry"))
256 continue
256 continue
257 node, key = s
257 node, key = s
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 try:
259 try:
260 bin_n = bin(node)
260 bin_n = bin(node)
261 except TypeError:
261 except TypeError:
262 warn(_("node '%s' is not well formed") % node)
262 warn(_("node '%s' is not well formed") % node)
263 continue
263 continue
264 if bin_n not in self.changelog.nodemap:
264 if bin_n not in self.changelog.nodemap:
265 warn(_("tag '%s' refers to unknown node") % key)
265 warn(_("tag '%s' refers to unknown node") % key)
266 continue
266 continue
267
267
268 h = []
268 h = []
269 if key in filetags:
269 if key in filetags:
270 n, h = filetags[key]
270 n, h = filetags[key]
271 h.append(n)
271 h.append(n)
272 filetags[key] = (bin_n, h)
272 filetags[key] = (bin_n, h)
273
273
274 for k, nh in filetags.iteritems():
274 for k, nh in filetags.iteritems():
275 if k not in globaltags:
275 if k not in globaltags:
276 globaltags[k] = nh
276 globaltags[k] = nh
277 tagtypes[k] = tagtype
277 tagtypes[k] = tagtype
278 continue
278 continue
279
279
280 # we prefer the global tag if:
280 # we prefer the global tag if:
281 # it supercedes us OR
281 # it supercedes us OR
282 # mutual supercedes and it has a higher rank
282 # mutual supercedes and it has a higher rank
283 # otherwise we win because we're tip-most
283 # otherwise we win because we're tip-most
284 an, ah = nh
284 an, ah = nh
285 bn, bh = globaltags[k]
285 bn, bh = globaltags[k]
286 if (bn != an and an in bh and
286 if (bn != an and an in bh and
287 (bn not in ah or len(bh) > len(ah))):
287 (bn not in ah or len(bh) > len(ah))):
288 an = bn
288 an = bn
289 ah.extend([n for n in bh if n not in ah])
289 ah.extend([n for n in bh if n not in ah])
290 globaltags[k] = an, ah
290 globaltags[k] = an, ah
291 tagtypes[k] = tagtype
291 tagtypes[k] = tagtype
292
292
293 def tagnodes():
294 last = {}
295 ret = []
296 for node in reversed(self.heads()):
297 c = self[node]
298 rev = c.rev()
299 try:
300 fnode = c.filenode('.hgtags')
301 except error.LookupError:
302 continue
303 ret.append((rev, node, fnode))
304 if fnode in last:
305 ret[last[fnode]] = None
306 last[fnode] = len(ret) - 1
307 return [item for item in ret if item]
308
293 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
294 f = None
310 f = None
295 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in tagnodes():
296 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
297 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
298 readtags(f.data().splitlines(), f, "global")
314 readtags(f.data().splitlines(), f, "global")
299
315
300 try:
316 try:
301 data = encoding.fromlocal(self.opener("localtags").read())
317 data = encoding.fromlocal(self.opener("localtags").read())
302 # localtags are stored in the local character set
318 # localtags are stored in the local character set
303 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
304 readtags(data.splitlines(), "localtags", "local")
320 readtags(data.splitlines(), "localtags", "local")
305 except IOError:
321 except IOError:
306 pass
322 pass
307
323
308 self.tagscache = {}
324 self.tagscache = {}
309 self._tagstypecache = {}
325 self._tagstypecache = {}
310 for k, nh in globaltags.iteritems():
326 for k, nh in globaltags.iteritems():
311 n = nh[0]
327 n = nh[0]
312 if n != nullid:
328 if n != nullid:
313 self.tagscache[k] = n
329 self.tagscache[k] = n
314 self._tagstypecache[k] = tagtypes[k]
330 self._tagstypecache[k] = tagtypes[k]
315 self.tagscache['tip'] = self.changelog.tip()
331 self.tagscache['tip'] = self.changelog.tip()
316 return self.tagscache
332 return self.tagscache
317
333
318 def tagtype(self, tagname):
334 def tagtype(self, tagname):
319 '''
335 '''
320 return the type of the given tag. result can be:
336 return the type of the given tag. result can be:
321
337
322 'local' : a local tag
338 'local' : a local tag
323 'global' : a global tag
339 'global' : a global tag
324 None : tag does not exist
340 None : tag does not exist
325 '''
341 '''
326
342
327 self.tags()
343 self.tags()
328
344
329 return self._tagstypecache.get(tagname)
345 return self._tagstypecache.get(tagname)
330
346
331 def _hgtagsnodes(self):
332 last = {}
333 ret = []
334 for node in reversed(self.heads()):
335 c = self[node]
336 rev = c.rev()
337 try:
338 fnode = c.filenode('.hgtags')
339 except error.LookupError:
340 continue
341 ret.append((rev, node, fnode))
342 if fnode in last:
343 ret[last[fnode]] = None
344 last[fnode] = len(ret) - 1
345 return [item for item in ret if item]
346
347 def tagslist(self):
347 def tagslist(self):
348 '''return a list of tags ordered by revision'''
348 '''return a list of tags ordered by revision'''
349 l = []
349 l = []
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 try:
351 try:
352 r = self.changelog.rev(n)
352 r = self.changelog.rev(n)
353 except:
353 except:
354 r = -2 # sort to the beginning of the list if unknown
354 r = -2 # sort to the beginning of the list if unknown
355 l.append((r, t, n))
355 l.append((r, t, n))
356 return [(t, n) for r, t, n in sorted(l)]
356 return [(t, n) for r, t, n in sorted(l)]
357
357
358 def nodetags(self, node):
358 def nodetags(self, node):
359 '''return the tags associated with a node'''
359 '''return the tags associated with a node'''
360 if not self.nodetagscache:
360 if not self.nodetagscache:
361 self.nodetagscache = {}
361 self.nodetagscache = {}
362 for t, n in self.tags().iteritems():
362 for t, n in self.tags().iteritems():
363 self.nodetagscache.setdefault(n, []).append(t)
363 self.nodetagscache.setdefault(n, []).append(t)
364 return self.nodetagscache.get(node, [])
364 return self.nodetagscache.get(node, [])
365
365
366 def _branchtags(self, partial, lrev):
366 def _branchtags(self, partial, lrev):
367 # TODO: rename this function?
367 # TODO: rename this function?
368 tiprev = len(self) - 1
368 tiprev = len(self) - 1
369 if lrev != tiprev:
369 if lrev != tiprev:
370 self._updatebranchcache(partial, lrev+1, tiprev+1)
370 self._updatebranchcache(partial, lrev+1, tiprev+1)
371 self._writebranchcache(partial, self.changelog.tip(), tiprev)
371 self._writebranchcache(partial, self.changelog.tip(), tiprev)
372
372
373 return partial
373 return partial
374
374
375 def branchmap(self):
375 def branchmap(self):
376 tip = self.changelog.tip()
376 tip = self.changelog.tip()
377 if self.branchcache is not None and self._branchcachetip == tip:
377 if self.branchcache is not None and self._branchcachetip == tip:
378 return self.branchcache
378 return self.branchcache
379
379
380 oldtip = self._branchcachetip
380 oldtip = self._branchcachetip
381 self._branchcachetip = tip
381 self._branchcachetip = tip
382 if self.branchcache is None:
382 if self.branchcache is None:
383 self.branchcache = {} # avoid recursion in changectx
383 self.branchcache = {} # avoid recursion in changectx
384 else:
384 else:
385 self.branchcache.clear() # keep using the same dict
385 self.branchcache.clear() # keep using the same dict
386 if oldtip is None or oldtip not in self.changelog.nodemap:
386 if oldtip is None or oldtip not in self.changelog.nodemap:
387 partial, last, lrev = self._readbranchcache()
387 partial, last, lrev = self._readbranchcache()
388 else:
388 else:
389 lrev = self.changelog.rev(oldtip)
389 lrev = self.changelog.rev(oldtip)
390 partial = self._ubranchcache
390 partial = self._ubranchcache
391
391
392 self._branchtags(partial, lrev)
392 self._branchtags(partial, lrev)
393 # this private cache holds all heads (not just tips)
393 # this private cache holds all heads (not just tips)
394 self._ubranchcache = partial
394 self._ubranchcache = partial
395
395
396 # the branch cache is stored on disk as UTF-8, but in the local
396 # the branch cache is stored on disk as UTF-8, but in the local
397 # charset internally
397 # charset internally
398 for k, v in partial.iteritems():
398 for k, v in partial.iteritems():
399 self.branchcache[encoding.tolocal(k)] = v
399 self.branchcache[encoding.tolocal(k)] = v
400 return self.branchcache
400 return self.branchcache
401
401
402
402
403 def branchtags(self):
403 def branchtags(self):
404 '''return a dict where branch names map to the tipmost head of
404 '''return a dict where branch names map to the tipmost head of
405 the branch, open heads come before closed'''
405 the branch, open heads come before closed'''
406 bt = {}
406 bt = {}
407 for bn, heads in self.branchmap().iteritems():
407 for bn, heads in self.branchmap().iteritems():
408 head = None
408 head = None
409 for i in range(len(heads)-1, -1, -1):
409 for i in range(len(heads)-1, -1, -1):
410 h = heads[i]
410 h = heads[i]
411 if 'close' not in self.changelog.read(h)[5]:
411 if 'close' not in self.changelog.read(h)[5]:
412 head = h
412 head = h
413 break
413 break
414 # no open heads were found
414 # no open heads were found
415 if head is None:
415 if head is None:
416 head = heads[-1]
416 head = heads[-1]
417 bt[bn] = head
417 bt[bn] = head
418 return bt
418 return bt
419
419
420
420
421 def _readbranchcache(self):
421 def _readbranchcache(self):
422 partial = {}
422 partial = {}
423 try:
423 try:
424 f = self.opener("branchheads.cache")
424 f = self.opener("branchheads.cache")
425 lines = f.read().split('\n')
425 lines = f.read().split('\n')
426 f.close()
426 f.close()
427 except (IOError, OSError):
427 except (IOError, OSError):
428 return {}, nullid, nullrev
428 return {}, nullid, nullrev
429
429
430 try:
430 try:
431 last, lrev = lines.pop(0).split(" ", 1)
431 last, lrev = lines.pop(0).split(" ", 1)
432 last, lrev = bin(last), int(lrev)
432 last, lrev = bin(last), int(lrev)
433 if lrev >= len(self) or self[lrev].node() != last:
433 if lrev >= len(self) or self[lrev].node() != last:
434 # invalidate the cache
434 # invalidate the cache
435 raise ValueError('invalidating branch cache (tip differs)')
435 raise ValueError('invalidating branch cache (tip differs)')
436 for l in lines:
436 for l in lines:
437 if not l: continue
437 if not l: continue
438 node, label = l.split(" ", 1)
438 node, label = l.split(" ", 1)
439 partial.setdefault(label.strip(), []).append(bin(node))
439 partial.setdefault(label.strip(), []).append(bin(node))
440 except KeyboardInterrupt:
440 except KeyboardInterrupt:
441 raise
441 raise
442 except Exception, inst:
442 except Exception, inst:
443 if self.ui.debugflag:
443 if self.ui.debugflag:
444 self.ui.warn(str(inst), '\n')
444 self.ui.warn(str(inst), '\n')
445 partial, last, lrev = {}, nullid, nullrev
445 partial, last, lrev = {}, nullid, nullrev
446 return partial, last, lrev
446 return partial, last, lrev
447
447
448 def _writebranchcache(self, branches, tip, tiprev):
448 def _writebranchcache(self, branches, tip, tiprev):
449 try:
449 try:
450 f = self.opener("branchheads.cache", "w", atomictemp=True)
450 f = self.opener("branchheads.cache", "w", atomictemp=True)
451 f.write("%s %s\n" % (hex(tip), tiprev))
451 f.write("%s %s\n" % (hex(tip), tiprev))
452 for label, nodes in branches.iteritems():
452 for label, nodes in branches.iteritems():
453 for node in nodes:
453 for node in nodes:
454 f.write("%s %s\n" % (hex(node), label))
454 f.write("%s %s\n" % (hex(node), label))
455 f.rename()
455 f.rename()
456 except (IOError, OSError):
456 except (IOError, OSError):
457 pass
457 pass
458
458
459 def _updatebranchcache(self, partial, start, end):
459 def _updatebranchcache(self, partial, start, end):
460 for r in xrange(start, end):
460 for r in xrange(start, end):
461 c = self[r]
461 c = self[r]
462 b = c.branch()
462 b = c.branch()
463 bheads = partial.setdefault(b, [])
463 bheads = partial.setdefault(b, [])
464 bheads.append(c.node())
464 bheads.append(c.node())
465 for p in c.parents():
465 for p in c.parents():
466 pn = p.node()
466 pn = p.node()
467 if pn in bheads:
467 if pn in bheads:
468 bheads.remove(pn)
468 bheads.remove(pn)
469
469
470 def lookup(self, key):
470 def lookup(self, key):
471 if isinstance(key, int):
471 if isinstance(key, int):
472 return self.changelog.node(key)
472 return self.changelog.node(key)
473 elif key == '.':
473 elif key == '.':
474 return self.dirstate.parents()[0]
474 return self.dirstate.parents()[0]
475 elif key == 'null':
475 elif key == 'null':
476 return nullid
476 return nullid
477 elif key == 'tip':
477 elif key == 'tip':
478 return self.changelog.tip()
478 return self.changelog.tip()
479 n = self.changelog._match(key)
479 n = self.changelog._match(key)
480 if n:
480 if n:
481 return n
481 return n
482 if key in self.tags():
482 if key in self.tags():
483 return self.tags()[key]
483 return self.tags()[key]
484 if key in self.branchtags():
484 if key in self.branchtags():
485 return self.branchtags()[key]
485 return self.branchtags()[key]
486 n = self.changelog._partialmatch(key)
486 n = self.changelog._partialmatch(key)
487 if n:
487 if n:
488 return n
488 return n
489
489
490 # can't find key, check if it might have come from damaged dirstate
490 # can't find key, check if it might have come from damaged dirstate
491 if key in self.dirstate.parents():
491 if key in self.dirstate.parents():
492 raise error.Abort(_("working directory has unknown parent '%s'!")
492 raise error.Abort(_("working directory has unknown parent '%s'!")
493 % short(key))
493 % short(key))
494 try:
494 try:
495 if len(key) == 20:
495 if len(key) == 20:
496 key = hex(key)
496 key = hex(key)
497 except:
497 except:
498 pass
498 pass
499 raise error.RepoError(_("unknown revision '%s'") % key)
499 raise error.RepoError(_("unknown revision '%s'") % key)
500
500
501 def local(self):
501 def local(self):
502 return True
502 return True
503
503
504 def join(self, f):
504 def join(self, f):
505 return os.path.join(self.path, f)
505 return os.path.join(self.path, f)
506
506
507 def wjoin(self, f):
507 def wjoin(self, f):
508 return os.path.join(self.root, f)
508 return os.path.join(self.root, f)
509
509
510 def rjoin(self, f):
510 def rjoin(self, f):
511 return os.path.join(self.root, util.pconvert(f))
511 return os.path.join(self.root, util.pconvert(f))
512
512
513 def file(self, f):
513 def file(self, f):
514 if f[0] == '/':
514 if f[0] == '/':
515 f = f[1:]
515 f = f[1:]
516 return filelog.filelog(self.sopener, f)
516 return filelog.filelog(self.sopener, f)
517
517
518 def changectx(self, changeid):
518 def changectx(self, changeid):
519 return self[changeid]
519 return self[changeid]
520
520
521 def parents(self, changeid=None):
521 def parents(self, changeid=None):
522 '''get list of changectxs for parents of changeid'''
522 '''get list of changectxs for parents of changeid'''
523 return self[changeid].parents()
523 return self[changeid].parents()
524
524
525 def filectx(self, path, changeid=None, fileid=None):
525 def filectx(self, path, changeid=None, fileid=None):
526 """changeid can be a changeset revision, node, or tag.
526 """changeid can be a changeset revision, node, or tag.
527 fileid can be a file revision or node."""
527 fileid can be a file revision or node."""
528 return context.filectx(self, path, changeid, fileid)
528 return context.filectx(self, path, changeid, fileid)
529
529
530 def getcwd(self):
530 def getcwd(self):
531 return self.dirstate.getcwd()
531 return self.dirstate.getcwd()
532
532
533 def pathto(self, f, cwd=None):
533 def pathto(self, f, cwd=None):
534 return self.dirstate.pathto(f, cwd)
534 return self.dirstate.pathto(f, cwd)
535
535
536 def wfile(self, f, mode='r'):
536 def wfile(self, f, mode='r'):
537 return self.wopener(f, mode)
537 return self.wopener(f, mode)
538
538
539 def _link(self, f):
539 def _link(self, f):
540 return os.path.islink(self.wjoin(f))
540 return os.path.islink(self.wjoin(f))
541
541
542 def _filter(self, filter, filename, data):
542 def _filter(self, filter, filename, data):
543 if filter not in self.filterpats:
543 if filter not in self.filterpats:
544 l = []
544 l = []
545 for pat, cmd in self.ui.configitems(filter):
545 for pat, cmd in self.ui.configitems(filter):
546 if cmd == '!':
546 if cmd == '!':
547 continue
547 continue
548 mf = match_.match(self.root, '', [pat])
548 mf = match_.match(self.root, '', [pat])
549 fn = None
549 fn = None
550 params = cmd
550 params = cmd
551 for name, filterfn in self._datafilters.iteritems():
551 for name, filterfn in self._datafilters.iteritems():
552 if cmd.startswith(name):
552 if cmd.startswith(name):
553 fn = filterfn
553 fn = filterfn
554 params = cmd[len(name):].lstrip()
554 params = cmd[len(name):].lstrip()
555 break
555 break
556 if not fn:
556 if not fn:
557 fn = lambda s, c, **kwargs: util.filter(s, c)
557 fn = lambda s, c, **kwargs: util.filter(s, c)
558 # Wrap old filters not supporting keyword arguments
558 # Wrap old filters not supporting keyword arguments
559 if not inspect.getargspec(fn)[2]:
559 if not inspect.getargspec(fn)[2]:
560 oldfn = fn
560 oldfn = fn
561 fn = lambda s, c, **kwargs: oldfn(s, c)
561 fn = lambda s, c, **kwargs: oldfn(s, c)
562 l.append((mf, fn, params))
562 l.append((mf, fn, params))
563 self.filterpats[filter] = l
563 self.filterpats[filter] = l
564
564
565 for mf, fn, cmd in self.filterpats[filter]:
565 for mf, fn, cmd in self.filterpats[filter]:
566 if mf(filename):
566 if mf(filename):
567 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
567 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
568 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
568 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
569 break
569 break
570
570
571 return data
571 return data
572
572
573 def adddatafilter(self, name, filter):
573 def adddatafilter(self, name, filter):
574 self._datafilters[name] = filter
574 self._datafilters[name] = filter
575
575
576 def wread(self, filename):
576 def wread(self, filename):
577 if self._link(filename):
577 if self._link(filename):
578 data = os.readlink(self.wjoin(filename))
578 data = os.readlink(self.wjoin(filename))
579 else:
579 else:
580 data = self.wopener(filename, 'r').read()
580 data = self.wopener(filename, 'r').read()
581 return self._filter("encode", filename, data)
581 return self._filter("encode", filename, data)
582
582
583 def wwrite(self, filename, data, flags):
583 def wwrite(self, filename, data, flags):
584 data = self._filter("decode", filename, data)
584 data = self._filter("decode", filename, data)
585 try:
585 try:
586 os.unlink(self.wjoin(filename))
586 os.unlink(self.wjoin(filename))
587 except OSError:
587 except OSError:
588 pass
588 pass
589 if 'l' in flags:
589 if 'l' in flags:
590 self.wopener.symlink(data, filename)
590 self.wopener.symlink(data, filename)
591 else:
591 else:
592 self.wopener(filename, 'w').write(data)
592 self.wopener(filename, 'w').write(data)
593 if 'x' in flags:
593 if 'x' in flags:
594 util.set_flags(self.wjoin(filename), False, True)
594 util.set_flags(self.wjoin(filename), False, True)
595
595
596 def wwritedata(self, filename, data):
596 def wwritedata(self, filename, data):
597 return self._filter("decode", filename, data)
597 return self._filter("decode", filename, data)
598
598
599 def transaction(self):
599 def transaction(self):
600 tr = self._transref and self._transref() or None
600 tr = self._transref and self._transref() or None
601 if tr and tr.running():
601 if tr and tr.running():
602 return tr.nest()
602 return tr.nest()
603
603
604 # abort here if the journal already exists
604 # abort here if the journal already exists
605 if os.path.exists(self.sjoin("journal")):
605 if os.path.exists(self.sjoin("journal")):
606 raise error.RepoError(_("journal already exists - run hg recover"))
606 raise error.RepoError(_("journal already exists - run hg recover"))
607
607
608 # save dirstate for rollback
608 # save dirstate for rollback
609 try:
609 try:
610 ds = self.opener("dirstate").read()
610 ds = self.opener("dirstate").read()
611 except IOError:
611 except IOError:
612 ds = ""
612 ds = ""
613 self.opener("journal.dirstate", "w").write(ds)
613 self.opener("journal.dirstate", "w").write(ds)
614 self.opener("journal.branch", "w").write(self.dirstate.branch())
614 self.opener("journal.branch", "w").write(self.dirstate.branch())
615
615
616 renames = [(self.sjoin("journal"), self.sjoin("undo")),
616 renames = [(self.sjoin("journal"), self.sjoin("undo")),
617 (self.join("journal.dirstate"), self.join("undo.dirstate")),
617 (self.join("journal.dirstate"), self.join("undo.dirstate")),
618 (self.join("journal.branch"), self.join("undo.branch"))]
618 (self.join("journal.branch"), self.join("undo.branch"))]
619 tr = transaction.transaction(self.ui.warn, self.sopener,
619 tr = transaction.transaction(self.ui.warn, self.sopener,
620 self.sjoin("journal"),
620 self.sjoin("journal"),
621 aftertrans(renames),
621 aftertrans(renames),
622 self.store.createmode)
622 self.store.createmode)
623 self._transref = weakref.ref(tr)
623 self._transref = weakref.ref(tr)
624 return tr
624 return tr
625
625
626 def recover(self):
626 def recover(self):
627 lock = self.lock()
627 lock = self.lock()
628 try:
628 try:
629 if os.path.exists(self.sjoin("journal")):
629 if os.path.exists(self.sjoin("journal")):
630 self.ui.status(_("rolling back interrupted transaction\n"))
630 self.ui.status(_("rolling back interrupted transaction\n"))
631 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
631 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
632 self.invalidate()
632 self.invalidate()
633 return True
633 return True
634 else:
634 else:
635 self.ui.warn(_("no interrupted transaction available\n"))
635 self.ui.warn(_("no interrupted transaction available\n"))
636 return False
636 return False
637 finally:
637 finally:
638 lock.release()
638 lock.release()
639
639
640 def rollback(self):
640 def rollback(self):
641 wlock = lock = None
641 wlock = lock = None
642 try:
642 try:
643 wlock = self.wlock()
643 wlock = self.wlock()
644 lock = self.lock()
644 lock = self.lock()
645 if os.path.exists(self.sjoin("undo")):
645 if os.path.exists(self.sjoin("undo")):
646 self.ui.status(_("rolling back last transaction\n"))
646 self.ui.status(_("rolling back last transaction\n"))
647 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
647 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
648 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
648 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
649 try:
649 try:
650 branch = self.opener("undo.branch").read()
650 branch = self.opener("undo.branch").read()
651 self.dirstate.setbranch(branch)
651 self.dirstate.setbranch(branch)
652 except IOError:
652 except IOError:
653 self.ui.warn(_("Named branch could not be reset, "
653 self.ui.warn(_("Named branch could not be reset, "
654 "current branch still is: %s\n")
654 "current branch still is: %s\n")
655 % encoding.tolocal(self.dirstate.branch()))
655 % encoding.tolocal(self.dirstate.branch()))
656 self.invalidate()
656 self.invalidate()
657 self.dirstate.invalidate()
657 self.dirstate.invalidate()
658 else:
658 else:
659 self.ui.warn(_("no rollback information available\n"))
659 self.ui.warn(_("no rollback information available\n"))
660 finally:
660 finally:
661 release(lock, wlock)
661 release(lock, wlock)
662
662
663 def invalidate(self):
663 def invalidate(self):
664 for a in "changelog manifest".split():
664 for a in "changelog manifest".split():
665 if a in self.__dict__:
665 if a in self.__dict__:
666 delattr(self, a)
666 delattr(self, a)
667 self.tagscache = None
667 self.tagscache = None
668 self._tagstypecache = None
668 self._tagstypecache = None
669 self.nodetagscache = None
669 self.nodetagscache = None
670 self.branchcache = None
670 self.branchcache = None
671 self._ubranchcache = None
671 self._ubranchcache = None
672 self._branchcachetip = None
672 self._branchcachetip = None
673
673
674 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
674 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
675 try:
675 try:
676 l = lock.lock(lockname, 0, releasefn, desc=desc)
676 l = lock.lock(lockname, 0, releasefn, desc=desc)
677 except error.LockHeld, inst:
677 except error.LockHeld, inst:
678 if not wait:
678 if not wait:
679 raise
679 raise
680 self.ui.warn(_("waiting for lock on %s held by %r\n") %
680 self.ui.warn(_("waiting for lock on %s held by %r\n") %
681 (desc, inst.locker))
681 (desc, inst.locker))
682 # default to 600 seconds timeout
682 # default to 600 seconds timeout
683 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
683 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
684 releasefn, desc=desc)
684 releasefn, desc=desc)
685 if acquirefn:
685 if acquirefn:
686 acquirefn()
686 acquirefn()
687 return l
687 return l
688
688
689 def lock(self, wait=True):
689 def lock(self, wait=True):
690 l = self._lockref and self._lockref()
690 l = self._lockref and self._lockref()
691 if l is not None and l.held:
691 if l is not None and l.held:
692 l.lock()
692 l.lock()
693 return l
693 return l
694
694
695 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
695 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
696 _('repository %s') % self.origroot)
696 _('repository %s') % self.origroot)
697 self._lockref = weakref.ref(l)
697 self._lockref = weakref.ref(l)
698 return l
698 return l
699
699
700 def wlock(self, wait=True):
700 def wlock(self, wait=True):
701 l = self._wlockref and self._wlockref()
701 l = self._wlockref and self._wlockref()
702 if l is not None and l.held:
702 if l is not None and l.held:
703 l.lock()
703 l.lock()
704 return l
704 return l
705
705
706 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
707 self.dirstate.invalidate, _('working directory of %s') %
707 self.dirstate.invalidate, _('working directory of %s') %
708 self.origroot)
708 self.origroot)
709 self._wlockref = weakref.ref(l)
709 self._wlockref = weakref.ref(l)
710 return l
710 return l
711
711
712 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
713 """
713 """
714 commit an individual file as part of a larger transaction
714 commit an individual file as part of a larger transaction
715 """
715 """
716
716
717 fname = fctx.path()
717 fname = fctx.path()
718 text = fctx.data()
718 text = fctx.data()
719 flog = self.file(fname)
719 flog = self.file(fname)
720 fparent1 = manifest1.get(fname, nullid)
720 fparent1 = manifest1.get(fname, nullid)
721 fparent2 = fparent2o = manifest2.get(fname, nullid)
721 fparent2 = fparent2o = manifest2.get(fname, nullid)
722
722
723 meta = {}
723 meta = {}
724 copy = fctx.renamed()
724 copy = fctx.renamed()
725 if copy and copy[0] != fname:
725 if copy and copy[0] != fname:
726 # Mark the new revision of this file as a copy of another
726 # Mark the new revision of this file as a copy of another
727 # file. This copy data will effectively act as a parent
727 # file. This copy data will effectively act as a parent
728 # of this new revision. If this is a merge, the first
728 # of this new revision. If this is a merge, the first
729 # parent will be the nullid (meaning "look up the copy data")
729 # parent will be the nullid (meaning "look up the copy data")
730 # and the second one will be the other parent. For example:
730 # and the second one will be the other parent. For example:
731 #
731 #
732 # 0 --- 1 --- 3 rev1 changes file foo
732 # 0 --- 1 --- 3 rev1 changes file foo
733 # \ / rev2 renames foo to bar and changes it
733 # \ / rev2 renames foo to bar and changes it
734 # \- 2 -/ rev3 should have bar with all changes and
734 # \- 2 -/ rev3 should have bar with all changes and
735 # should record that bar descends from
735 # should record that bar descends from
736 # bar in rev2 and foo in rev1
736 # bar in rev2 and foo in rev1
737 #
737 #
738 # this allows this merge to succeed:
738 # this allows this merge to succeed:
739 #
739 #
740 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
741 # \ / merging rev3 and rev4 should use bar@rev2
741 # \ / merging rev3 and rev4 should use bar@rev2
742 # \- 2 --- 4 as the merge base
742 # \- 2 --- 4 as the merge base
743 #
743 #
744
744
745 cfname = copy[0]
745 cfname = copy[0]
746 crev = manifest1.get(cfname)
746 crev = manifest1.get(cfname)
747 newfparent = fparent2
747 newfparent = fparent2
748
748
749 if manifest2: # branch merge
749 if manifest2: # branch merge
750 if fparent2 == nullid or crev is None: # copied on remote side
750 if fparent2 == nullid or crev is None: # copied on remote side
751 if cfname in manifest2:
751 if cfname in manifest2:
752 crev = manifest2[cfname]
752 crev = manifest2[cfname]
753 newfparent = fparent1
753 newfparent = fparent1
754
754
755 # find source in nearest ancestor if we've lost track
755 # find source in nearest ancestor if we've lost track
756 if not crev:
756 if not crev:
757 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
757 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
758 (fname, cfname))
758 (fname, cfname))
759 for ancestor in self['.'].ancestors():
759 for ancestor in self['.'].ancestors():
760 if cfname in ancestor:
760 if cfname in ancestor:
761 crev = ancestor[cfname].filenode()
761 crev = ancestor[cfname].filenode()
762 break
762 break
763
763
764 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
764 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
765 meta["copy"] = cfname
765 meta["copy"] = cfname
766 meta["copyrev"] = hex(crev)
766 meta["copyrev"] = hex(crev)
767 fparent1, fparent2 = nullid, newfparent
767 fparent1, fparent2 = nullid, newfparent
768 elif fparent2 != nullid:
768 elif fparent2 != nullid:
769 # is one parent an ancestor of the other?
769 # is one parent an ancestor of the other?
770 fparentancestor = flog.ancestor(fparent1, fparent2)
770 fparentancestor = flog.ancestor(fparent1, fparent2)
771 if fparentancestor == fparent1:
771 if fparentancestor == fparent1:
772 fparent1, fparent2 = fparent2, nullid
772 fparent1, fparent2 = fparent2, nullid
773 elif fparentancestor == fparent2:
773 elif fparentancestor == fparent2:
774 fparent2 = nullid
774 fparent2 = nullid
775
775
776 # is the file changed?
776 # is the file changed?
777 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
778 changelist.append(fname)
778 changelist.append(fname)
779 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
780
780
781 # are just the flags changed during merge?
781 # are just the flags changed during merge?
782 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
783 changelist.append(fname)
783 changelist.append(fname)
784
784
785 return fparent1
785 return fparent1
786
786
787 def commit(self, text="", user=None, date=None, match=None, force=False,
787 def commit(self, text="", user=None, date=None, match=None, force=False,
788 editor=False, extra={}):
788 editor=False, extra={}):
789 """Add a new revision to current repository.
789 """Add a new revision to current repository.
790
790
791 Revision information is gathered from the working directory,
791 Revision information is gathered from the working directory,
792 match can be used to filter the committed files. If editor is
792 match can be used to filter the committed files. If editor is
793 supplied, it is called to get a commit message.
793 supplied, it is called to get a commit message.
794 """
794 """
795
795
796 def fail(f, msg):
796 def fail(f, msg):
797 raise util.Abort('%s: %s' % (f, msg))
797 raise util.Abort('%s: %s' % (f, msg))
798
798
799 if not match:
799 if not match:
800 match = match_.always(self.root, '')
800 match = match_.always(self.root, '')
801
801
802 if not force:
802 if not force:
803 vdirs = []
803 vdirs = []
804 match.dir = vdirs.append
804 match.dir = vdirs.append
805 match.bad = fail
805 match.bad = fail
806
806
807 wlock = self.wlock()
807 wlock = self.wlock()
808 try:
808 try:
809 p1, p2 = self.dirstate.parents()
809 p1, p2 = self.dirstate.parents()
810 wctx = self[None]
810 wctx = self[None]
811
811
812 if (not force and p2 != nullid and match and
812 if (not force and p2 != nullid and match and
813 (match.files() or match.anypats())):
813 (match.files() or match.anypats())):
814 raise util.Abort(_('cannot partially commit a merge '
814 raise util.Abort(_('cannot partially commit a merge '
815 '(do not specify files or patterns)'))
815 '(do not specify files or patterns)'))
816
816
817 changes = self.status(match=match, clean=force)
817 changes = self.status(match=match, clean=force)
818 if force:
818 if force:
819 changes[0].extend(changes[6]) # mq may commit unchanged files
819 changes[0].extend(changes[6]) # mq may commit unchanged files
820
820
821 # check subrepos
821 # check subrepos
822 subs = []
822 subs = []
823 for s in wctx.substate:
823 for s in wctx.substate:
824 if match(s) and wctx.sub(s).dirty():
824 if match(s) and wctx.sub(s).dirty():
825 subs.append(s)
825 subs.append(s)
826 if subs and '.hgsubstate' not in changes[0]:
826 if subs and '.hgsubstate' not in changes[0]:
827 changes[0].insert(0, '.hgsubstate')
827 changes[0].insert(0, '.hgsubstate')
828
828
829 # make sure all explicit patterns are matched
829 # make sure all explicit patterns are matched
830 if not force and match.files():
830 if not force and match.files():
831 matched = set(changes[0] + changes[1] + changes[2])
831 matched = set(changes[0] + changes[1] + changes[2])
832
832
833 for f in match.files():
833 for f in match.files():
834 if f == '.' or f in matched or f in wctx.substate:
834 if f == '.' or f in matched or f in wctx.substate:
835 continue
835 continue
836 if f in changes[3]: # missing
836 if f in changes[3]: # missing
837 fail(f, _('file not found!'))
837 fail(f, _('file not found!'))
838 if f in vdirs: # visited directory
838 if f in vdirs: # visited directory
839 d = f + '/'
839 d = f + '/'
840 for mf in matched:
840 for mf in matched:
841 if mf.startswith(d):
841 if mf.startswith(d):
842 break
842 break
843 else:
843 else:
844 fail(f, _("no match under directory!"))
844 fail(f, _("no match under directory!"))
845 elif f not in self.dirstate:
845 elif f not in self.dirstate:
846 fail(f, _("file not tracked!"))
846 fail(f, _("file not tracked!"))
847
847
848 if (not force and not extra.get("close") and p2 == nullid
848 if (not force and not extra.get("close") and p2 == nullid
849 and not (changes[0] or changes[1] or changes[2])
849 and not (changes[0] or changes[1] or changes[2])
850 and self[None].branch() == self['.'].branch()):
850 and self[None].branch() == self['.'].branch()):
851 self.ui.status(_("nothing changed\n"))
851 self.ui.status(_("nothing changed\n"))
852 return None
852 return None
853
853
854 ms = merge_.mergestate(self)
854 ms = merge_.mergestate(self)
855 for f in changes[0]:
855 for f in changes[0]:
856 if f in ms and ms[f] == 'u':
856 if f in ms and ms[f] == 'u':
857 raise util.Abort(_("unresolved merge conflicts "
857 raise util.Abort(_("unresolved merge conflicts "
858 "(see hg resolve)"))
858 "(see hg resolve)"))
859
859
860 cctx = context.workingctx(self, (p1, p2), text, user, date,
860 cctx = context.workingctx(self, (p1, p2), text, user, date,
861 extra, changes)
861 extra, changes)
862 if editor:
862 if editor:
863 cctx._text = editor(self, cctx)
863 cctx._text = editor(self, cctx)
864
864
865 # commit subs
865 # commit subs
866 if subs:
866 if subs:
867 state = wctx.substate.copy()
867 state = wctx.substate.copy()
868 for s in subs:
868 for s in subs:
869 self.ui.status(_('committing subrepository %s\n') % s)
869 self.ui.status(_('committing subrepository %s\n') % s)
870 sr = wctx.sub(s).commit(cctx._text, user, date)
870 sr = wctx.sub(s).commit(cctx._text, user, date)
871 state[s] = (state[s][0], sr)
871 state[s] = (state[s][0], sr)
872 subrepo.writestate(self, state)
872 subrepo.writestate(self, state)
873
873
874 ret = self.commitctx(cctx, True)
874 ret = self.commitctx(cctx, True)
875
875
876 # update dirstate and mergestate
876 # update dirstate and mergestate
877 for f in changes[0] + changes[1]:
877 for f in changes[0] + changes[1]:
878 self.dirstate.normal(f)
878 self.dirstate.normal(f)
879 for f in changes[2]:
879 for f in changes[2]:
880 self.dirstate.forget(f)
880 self.dirstate.forget(f)
881 self.dirstate.setparents(ret)
881 self.dirstate.setparents(ret)
882 ms.reset()
882 ms.reset()
883
883
884 return ret
884 return ret
885
885
886 finally:
886 finally:
887 wlock.release()
887 wlock.release()
888
888
889 def commitctx(self, ctx, error=False):
889 def commitctx(self, ctx, error=False):
890 """Add a new revision to current repository.
890 """Add a new revision to current repository.
891
891
892 Revision information is passed via the context argument.
892 Revision information is passed via the context argument.
893 """
893 """
894
894
895 tr = lock = None
895 tr = lock = None
896 removed = ctx.removed()
896 removed = ctx.removed()
897 p1, p2 = ctx.p1(), ctx.p2()
897 p1, p2 = ctx.p1(), ctx.p2()
898 m1 = p1.manifest().copy()
898 m1 = p1.manifest().copy()
899 m2 = p2.manifest()
899 m2 = p2.manifest()
900 user = ctx.user()
900 user = ctx.user()
901
901
902 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
902 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
903 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
903 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
904
904
905 lock = self.lock()
905 lock = self.lock()
906 try:
906 try:
907 tr = self.transaction()
907 tr = self.transaction()
908 trp = weakref.proxy(tr)
908 trp = weakref.proxy(tr)
909
909
910 # check in files
910 # check in files
911 new = {}
911 new = {}
912 changed = []
912 changed = []
913 linkrev = len(self)
913 linkrev = len(self)
914 for f in sorted(ctx.modified() + ctx.added()):
914 for f in sorted(ctx.modified() + ctx.added()):
915 self.ui.note(f + "\n")
915 self.ui.note(f + "\n")
916 try:
916 try:
917 fctx = ctx[f]
917 fctx = ctx[f]
918 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
918 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
919 changed)
919 changed)
920 m1.set(f, fctx.flags())
920 m1.set(f, fctx.flags())
921 except (OSError, IOError):
921 except (OSError, IOError):
922 if error:
922 if error:
923 self.ui.warn(_("trouble committing %s!\n") % f)
923 self.ui.warn(_("trouble committing %s!\n") % f)
924 raise
924 raise
925 else:
925 else:
926 removed.append(f)
926 removed.append(f)
927
927
928 # update manifest
928 # update manifest
929 m1.update(new)
929 m1.update(new)
930 removed = [f for f in sorted(removed) if f in m1 or f in m2]
930 removed = [f for f in sorted(removed) if f in m1 or f in m2]
931 drop = [f for f in removed if f in m1]
931 drop = [f for f in removed if f in m1]
932 for f in drop:
932 for f in drop:
933 del m1[f]
933 del m1[f]
934 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
934 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
935 p2.manifestnode(), (new, drop))
935 p2.manifestnode(), (new, drop))
936
936
937 # update changelog
937 # update changelog
938 self.changelog.delayupdate()
938 self.changelog.delayupdate()
939 n = self.changelog.add(mn, changed + removed, ctx.description(),
939 n = self.changelog.add(mn, changed + removed, ctx.description(),
940 trp, p1.node(), p2.node(),
940 trp, p1.node(), p2.node(),
941 user, ctx.date(), ctx.extra().copy())
941 user, ctx.date(), ctx.extra().copy())
942 p = lambda: self.changelog.writepending() and self.root or ""
942 p = lambda: self.changelog.writepending() and self.root or ""
943 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
943 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
944 parent2=xp2, pending=p)
944 parent2=xp2, pending=p)
945 self.changelog.finalize(trp)
945 self.changelog.finalize(trp)
946 tr.close()
946 tr.close()
947
947
948 if self.branchcache:
948 if self.branchcache:
949 self.branchtags()
949 self.branchtags()
950
950
951 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
951 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
952 return n
952 return n
953 finally:
953 finally:
954 del tr
954 del tr
955 lock.release()
955 lock.release()
956
956
957 def walk(self, match, node=None):
957 def walk(self, match, node=None):
958 '''
958 '''
959 walk recursively through the directory tree or a given
959 walk recursively through the directory tree or a given
960 changeset, finding all files matched by the match
960 changeset, finding all files matched by the match
961 function
961 function
962 '''
962 '''
963 return self[node].walk(match)
963 return self[node].walk(match)
964
964
965 def status(self, node1='.', node2=None, match=None,
965 def status(self, node1='.', node2=None, match=None,
966 ignored=False, clean=False, unknown=False):
966 ignored=False, clean=False, unknown=False):
967 """return status of files between two nodes or node and working directory
967 """return status of files between two nodes or node and working directory
968
968
969 If node1 is None, use the first dirstate parent instead.
969 If node1 is None, use the first dirstate parent instead.
970 If node2 is None, compare node1 with working directory.
970 If node2 is None, compare node1 with working directory.
971 """
971 """
972
972
973 def mfmatches(ctx):
973 def mfmatches(ctx):
974 mf = ctx.manifest().copy()
974 mf = ctx.manifest().copy()
975 for fn in mf.keys():
975 for fn in mf.keys():
976 if not match(fn):
976 if not match(fn):
977 del mf[fn]
977 del mf[fn]
978 return mf
978 return mf
979
979
980 if isinstance(node1, context.changectx):
980 if isinstance(node1, context.changectx):
981 ctx1 = node1
981 ctx1 = node1
982 else:
982 else:
983 ctx1 = self[node1]
983 ctx1 = self[node1]
984 if isinstance(node2, context.changectx):
984 if isinstance(node2, context.changectx):
985 ctx2 = node2
985 ctx2 = node2
986 else:
986 else:
987 ctx2 = self[node2]
987 ctx2 = self[node2]
988
988
989 working = ctx2.rev() is None
989 working = ctx2.rev() is None
990 parentworking = working and ctx1 == self['.']
990 parentworking = working and ctx1 == self['.']
991 match = match or match_.always(self.root, self.getcwd())
991 match = match or match_.always(self.root, self.getcwd())
992 listignored, listclean, listunknown = ignored, clean, unknown
992 listignored, listclean, listunknown = ignored, clean, unknown
993
993
994 # load earliest manifest first for caching reasons
994 # load earliest manifest first for caching reasons
995 if not working and ctx2.rev() < ctx1.rev():
995 if not working and ctx2.rev() < ctx1.rev():
996 ctx2.manifest()
996 ctx2.manifest()
997
997
998 if not parentworking:
998 if not parentworking:
999 def bad(f, msg):
999 def bad(f, msg):
1000 if f not in ctx1:
1000 if f not in ctx1:
1001 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1001 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1002 match.bad = bad
1002 match.bad = bad
1003
1003
1004 if working: # we need to scan the working dir
1004 if working: # we need to scan the working dir
1005 s = self.dirstate.status(match, listignored, listclean, listunknown)
1005 s = self.dirstate.status(match, listignored, listclean, listunknown)
1006 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1006 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1007
1007
1008 # check for any possibly clean files
1008 # check for any possibly clean files
1009 if parentworking and cmp:
1009 if parentworking and cmp:
1010 fixup = []
1010 fixup = []
1011 # do a full compare of any files that might have changed
1011 # do a full compare of any files that might have changed
1012 for f in sorted(cmp):
1012 for f in sorted(cmp):
1013 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1013 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1014 or ctx1[f].cmp(ctx2[f].data())):
1014 or ctx1[f].cmp(ctx2[f].data())):
1015 modified.append(f)
1015 modified.append(f)
1016 else:
1016 else:
1017 fixup.append(f)
1017 fixup.append(f)
1018
1018
1019 if listclean:
1019 if listclean:
1020 clean += fixup
1020 clean += fixup
1021
1021
1022 # update dirstate for files that are actually clean
1022 # update dirstate for files that are actually clean
1023 if fixup:
1023 if fixup:
1024 try:
1024 try:
1025 # updating the dirstate is optional
1025 # updating the dirstate is optional
1026 # so we don't wait on the lock
1026 # so we don't wait on the lock
1027 wlock = self.wlock(False)
1027 wlock = self.wlock(False)
1028 try:
1028 try:
1029 for f in fixup:
1029 for f in fixup:
1030 self.dirstate.normal(f)
1030 self.dirstate.normal(f)
1031 finally:
1031 finally:
1032 wlock.release()
1032 wlock.release()
1033 except error.LockError:
1033 except error.LockError:
1034 pass
1034 pass
1035
1035
1036 if not parentworking:
1036 if not parentworking:
1037 mf1 = mfmatches(ctx1)
1037 mf1 = mfmatches(ctx1)
1038 if working:
1038 if working:
1039 # we are comparing working dir against non-parent
1039 # we are comparing working dir against non-parent
1040 # generate a pseudo-manifest for the working dir
1040 # generate a pseudo-manifest for the working dir
1041 mf2 = mfmatches(self['.'])
1041 mf2 = mfmatches(self['.'])
1042 for f in cmp + modified + added:
1042 for f in cmp + modified + added:
1043 mf2[f] = None
1043 mf2[f] = None
1044 mf2.set(f, ctx2.flags(f))
1044 mf2.set(f, ctx2.flags(f))
1045 for f in removed:
1045 for f in removed:
1046 if f in mf2:
1046 if f in mf2:
1047 del mf2[f]
1047 del mf2[f]
1048 else:
1048 else:
1049 # we are comparing two revisions
1049 # we are comparing two revisions
1050 deleted, unknown, ignored = [], [], []
1050 deleted, unknown, ignored = [], [], []
1051 mf2 = mfmatches(ctx2)
1051 mf2 = mfmatches(ctx2)
1052
1052
1053 modified, added, clean = [], [], []
1053 modified, added, clean = [], [], []
1054 for fn in mf2:
1054 for fn in mf2:
1055 if fn in mf1:
1055 if fn in mf1:
1056 if (mf1.flags(fn) != mf2.flags(fn) or
1056 if (mf1.flags(fn) != mf2.flags(fn) or
1057 (mf1[fn] != mf2[fn] and
1057 (mf1[fn] != mf2[fn] and
1058 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1058 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1059 modified.append(fn)
1059 modified.append(fn)
1060 elif listclean:
1060 elif listclean:
1061 clean.append(fn)
1061 clean.append(fn)
1062 del mf1[fn]
1062 del mf1[fn]
1063 else:
1063 else:
1064 added.append(fn)
1064 added.append(fn)
1065 removed = mf1.keys()
1065 removed = mf1.keys()
1066
1066
1067 r = modified, added, removed, deleted, unknown, ignored, clean
1067 r = modified, added, removed, deleted, unknown, ignored, clean
1068 [l.sort() for l in r]
1068 [l.sort() for l in r]
1069 return r
1069 return r
1070
1070
1071 def add(self, list):
1071 def add(self, list):
1072 wlock = self.wlock()
1072 wlock = self.wlock()
1073 try:
1073 try:
1074 rejected = []
1074 rejected = []
1075 for f in list:
1075 for f in list:
1076 p = self.wjoin(f)
1076 p = self.wjoin(f)
1077 try:
1077 try:
1078 st = os.lstat(p)
1078 st = os.lstat(p)
1079 except:
1079 except:
1080 self.ui.warn(_("%s does not exist!\n") % f)
1080 self.ui.warn(_("%s does not exist!\n") % f)
1081 rejected.append(f)
1081 rejected.append(f)
1082 continue
1082 continue
1083 if st.st_size > 10000000:
1083 if st.st_size > 10000000:
1084 self.ui.warn(_("%s: files over 10MB may cause memory and"
1084 self.ui.warn(_("%s: files over 10MB may cause memory and"
1085 " performance problems\n"
1085 " performance problems\n"
1086 "(use 'hg revert %s' to unadd the file)\n")
1086 "(use 'hg revert %s' to unadd the file)\n")
1087 % (f, f))
1087 % (f, f))
1088 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1088 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1089 self.ui.warn(_("%s not added: only files and symlinks "
1089 self.ui.warn(_("%s not added: only files and symlinks "
1090 "supported currently\n") % f)
1090 "supported currently\n") % f)
1091 rejected.append(p)
1091 rejected.append(p)
1092 elif self.dirstate[f] in 'amn':
1092 elif self.dirstate[f] in 'amn':
1093 self.ui.warn(_("%s already tracked!\n") % f)
1093 self.ui.warn(_("%s already tracked!\n") % f)
1094 elif self.dirstate[f] == 'r':
1094 elif self.dirstate[f] == 'r':
1095 self.dirstate.normallookup(f)
1095 self.dirstate.normallookup(f)
1096 else:
1096 else:
1097 self.dirstate.add(f)
1097 self.dirstate.add(f)
1098 return rejected
1098 return rejected
1099 finally:
1099 finally:
1100 wlock.release()
1100 wlock.release()
1101
1101
1102 def forget(self, list):
1102 def forget(self, list):
1103 wlock = self.wlock()
1103 wlock = self.wlock()
1104 try:
1104 try:
1105 for f in list:
1105 for f in list:
1106 if self.dirstate[f] != 'a':
1106 if self.dirstate[f] != 'a':
1107 self.ui.warn(_("%s not added!\n") % f)
1107 self.ui.warn(_("%s not added!\n") % f)
1108 else:
1108 else:
1109 self.dirstate.forget(f)
1109 self.dirstate.forget(f)
1110 finally:
1110 finally:
1111 wlock.release()
1111 wlock.release()
1112
1112
1113 def remove(self, list, unlink=False):
1113 def remove(self, list, unlink=False):
1114 if unlink:
1114 if unlink:
1115 for f in list:
1115 for f in list:
1116 try:
1116 try:
1117 util.unlink(self.wjoin(f))
1117 util.unlink(self.wjoin(f))
1118 except OSError, inst:
1118 except OSError, inst:
1119 if inst.errno != errno.ENOENT:
1119 if inst.errno != errno.ENOENT:
1120 raise
1120 raise
1121 wlock = self.wlock()
1121 wlock = self.wlock()
1122 try:
1122 try:
1123 for f in list:
1123 for f in list:
1124 if unlink and os.path.exists(self.wjoin(f)):
1124 if unlink and os.path.exists(self.wjoin(f)):
1125 self.ui.warn(_("%s still exists!\n") % f)
1125 self.ui.warn(_("%s still exists!\n") % f)
1126 elif self.dirstate[f] == 'a':
1126 elif self.dirstate[f] == 'a':
1127 self.dirstate.forget(f)
1127 self.dirstate.forget(f)
1128 elif f not in self.dirstate:
1128 elif f not in self.dirstate:
1129 self.ui.warn(_("%s not tracked!\n") % f)
1129 self.ui.warn(_("%s not tracked!\n") % f)
1130 else:
1130 else:
1131 self.dirstate.remove(f)
1131 self.dirstate.remove(f)
1132 finally:
1132 finally:
1133 wlock.release()
1133 wlock.release()
1134
1134
1135 def undelete(self, list):
1135 def undelete(self, list):
1136 manifests = [self.manifest.read(self.changelog.read(p)[0])
1136 manifests = [self.manifest.read(self.changelog.read(p)[0])
1137 for p in self.dirstate.parents() if p != nullid]
1137 for p in self.dirstate.parents() if p != nullid]
1138 wlock = self.wlock()
1138 wlock = self.wlock()
1139 try:
1139 try:
1140 for f in list:
1140 for f in list:
1141 if self.dirstate[f] != 'r':
1141 if self.dirstate[f] != 'r':
1142 self.ui.warn(_("%s not removed!\n") % f)
1142 self.ui.warn(_("%s not removed!\n") % f)
1143 else:
1143 else:
1144 m = f in manifests[0] and manifests[0] or manifests[1]
1144 m = f in manifests[0] and manifests[0] or manifests[1]
1145 t = self.file(f).read(m[f])
1145 t = self.file(f).read(m[f])
1146 self.wwrite(f, t, m.flags(f))
1146 self.wwrite(f, t, m.flags(f))
1147 self.dirstate.normal(f)
1147 self.dirstate.normal(f)
1148 finally:
1148 finally:
1149 wlock.release()
1149 wlock.release()
1150
1150
1151 def copy(self, source, dest):
1151 def copy(self, source, dest):
1152 p = self.wjoin(dest)
1152 p = self.wjoin(dest)
1153 if not (os.path.exists(p) or os.path.islink(p)):
1153 if not (os.path.exists(p) or os.path.islink(p)):
1154 self.ui.warn(_("%s does not exist!\n") % dest)
1154 self.ui.warn(_("%s does not exist!\n") % dest)
1155 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 elif not (os.path.isfile(p) or os.path.islink(p)):
1156 self.ui.warn(_("copy failed: %s is not a file or a "
1156 self.ui.warn(_("copy failed: %s is not a file or a "
1157 "symbolic link\n") % dest)
1157 "symbolic link\n") % dest)
1158 else:
1158 else:
1159 wlock = self.wlock()
1159 wlock = self.wlock()
1160 try:
1160 try:
1161 if self.dirstate[dest] in '?r':
1161 if self.dirstate[dest] in '?r':
1162 self.dirstate.add(dest)
1162 self.dirstate.add(dest)
1163 self.dirstate.copy(source, dest)
1163 self.dirstate.copy(source, dest)
1164 finally:
1164 finally:
1165 wlock.release()
1165 wlock.release()
1166
1166
1167 def heads(self, start=None):
1167 def heads(self, start=None):
1168 heads = self.changelog.heads(start)
1168 heads = self.changelog.heads(start)
1169 # sort the output in rev descending order
1169 # sort the output in rev descending order
1170 heads = [(-self.changelog.rev(h), h) for h in heads]
1170 heads = [(-self.changelog.rev(h), h) for h in heads]
1171 return [n for (r, n) in sorted(heads)]
1171 return [n for (r, n) in sorted(heads)]
1172
1172
1173 def branchheads(self, branch=None, start=None, closed=False):
1173 def branchheads(self, branch=None, start=None, closed=False):
1174 if branch is None:
1174 if branch is None:
1175 branch = self[None].branch()
1175 branch = self[None].branch()
1176 branches = self.branchmap()
1176 branches = self.branchmap()
1177 if branch not in branches:
1177 if branch not in branches:
1178 return []
1178 return []
1179 bheads = branches[branch]
1179 bheads = branches[branch]
1180 # the cache returns heads ordered lowest to highest
1180 # the cache returns heads ordered lowest to highest
1181 bheads.reverse()
1181 bheads.reverse()
1182 if start is not None:
1182 if start is not None:
1183 # filter out the heads that cannot be reached from startrev
1183 # filter out the heads that cannot be reached from startrev
1184 bheads = self.changelog.nodesbetween([start], bheads)[2]
1184 bheads = self.changelog.nodesbetween([start], bheads)[2]
1185 if not closed:
1185 if not closed:
1186 bheads = [h for h in bheads if
1186 bheads = [h for h in bheads if
1187 ('close' not in self.changelog.read(h)[5])]
1187 ('close' not in self.changelog.read(h)[5])]
1188 return bheads
1188 return bheads
1189
1189
1190 def branches(self, nodes):
1190 def branches(self, nodes):
1191 if not nodes:
1191 if not nodes:
1192 nodes = [self.changelog.tip()]
1192 nodes = [self.changelog.tip()]
1193 b = []
1193 b = []
1194 for n in nodes:
1194 for n in nodes:
1195 t = n
1195 t = n
1196 while 1:
1196 while 1:
1197 p = self.changelog.parents(n)
1197 p = self.changelog.parents(n)
1198 if p[1] != nullid or p[0] == nullid:
1198 if p[1] != nullid or p[0] == nullid:
1199 b.append((t, n, p[0], p[1]))
1199 b.append((t, n, p[0], p[1]))
1200 break
1200 break
1201 n = p[0]
1201 n = p[0]
1202 return b
1202 return b
1203
1203
1204 def between(self, pairs):
1204 def between(self, pairs):
1205 r = []
1205 r = []
1206
1206
1207 for top, bottom in pairs:
1207 for top, bottom in pairs:
1208 n, l, i = top, [], 0
1208 n, l, i = top, [], 0
1209 f = 1
1209 f = 1
1210
1210
1211 while n != bottom and n != nullid:
1211 while n != bottom and n != nullid:
1212 p = self.changelog.parents(n)[0]
1212 p = self.changelog.parents(n)[0]
1213 if i == f:
1213 if i == f:
1214 l.append(n)
1214 l.append(n)
1215 f = f * 2
1215 f = f * 2
1216 n = p
1216 n = p
1217 i += 1
1217 i += 1
1218
1218
1219 r.append(l)
1219 r.append(l)
1220
1220
1221 return r
1221 return r
1222
1222
1223 def findincoming(self, remote, base=None, heads=None, force=False):
1223 def findincoming(self, remote, base=None, heads=None, force=False):
1224 """Return list of roots of the subsets of missing nodes from remote
1224 """Return list of roots of the subsets of missing nodes from remote
1225
1225
1226 If base dict is specified, assume that these nodes and their parents
1226 If base dict is specified, assume that these nodes and their parents
1227 exist on the remote side and that no child of a node of base exists
1227 exist on the remote side and that no child of a node of base exists
1228 in both remote and self.
1228 in both remote and self.
1229 Furthermore base will be updated to include the nodes that exists
1229 Furthermore base will be updated to include the nodes that exists
1230 in self and remote but no children exists in self and remote.
1230 in self and remote but no children exists in self and remote.
1231 If a list of heads is specified, return only nodes which are heads
1231 If a list of heads is specified, return only nodes which are heads
1232 or ancestors of these heads.
1232 or ancestors of these heads.
1233
1233
1234 All the ancestors of base are in self and in remote.
1234 All the ancestors of base are in self and in remote.
1235 All the descendants of the list returned are missing in self.
1235 All the descendants of the list returned are missing in self.
1236 (and so we know that the rest of the nodes are missing in remote, see
1236 (and so we know that the rest of the nodes are missing in remote, see
1237 outgoing)
1237 outgoing)
1238 """
1238 """
1239 return self.findcommonincoming(remote, base, heads, force)[1]
1239 return self.findcommonincoming(remote, base, heads, force)[1]
1240
1240
1241 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1241 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1242 """Return a tuple (common, missing roots, heads) used to identify
1242 """Return a tuple (common, missing roots, heads) used to identify
1243 missing nodes from remote.
1243 missing nodes from remote.
1244
1244
1245 If base dict is specified, assume that these nodes and their parents
1245 If base dict is specified, assume that these nodes and their parents
1246 exist on the remote side and that no child of a node of base exists
1246 exist on the remote side and that no child of a node of base exists
1247 in both remote and self.
1247 in both remote and self.
1248 Furthermore base will be updated to include the nodes that exists
1248 Furthermore base will be updated to include the nodes that exists
1249 in self and remote but no children exists in self and remote.
1249 in self and remote but no children exists in self and remote.
1250 If a list of heads is specified, return only nodes which are heads
1250 If a list of heads is specified, return only nodes which are heads
1251 or ancestors of these heads.
1251 or ancestors of these heads.
1252
1252
1253 All the ancestors of base are in self and in remote.
1253 All the ancestors of base are in self and in remote.
1254 """
1254 """
1255 m = self.changelog.nodemap
1255 m = self.changelog.nodemap
1256 search = []
1256 search = []
1257 fetch = set()
1257 fetch = set()
1258 seen = set()
1258 seen = set()
1259 seenbranch = set()
1259 seenbranch = set()
1260 if base is None:
1260 if base is None:
1261 base = {}
1261 base = {}
1262
1262
1263 if not heads:
1263 if not heads:
1264 heads = remote.heads()
1264 heads = remote.heads()
1265
1265
1266 if self.changelog.tip() == nullid:
1266 if self.changelog.tip() == nullid:
1267 base[nullid] = 1
1267 base[nullid] = 1
1268 if heads != [nullid]:
1268 if heads != [nullid]:
1269 return [nullid], [nullid], list(heads)
1269 return [nullid], [nullid], list(heads)
1270 return [nullid], [], []
1270 return [nullid], [], []
1271
1271
1272 # assume we're closer to the tip than the root
1272 # assume we're closer to the tip than the root
1273 # and start by examining the heads
1273 # and start by examining the heads
1274 self.ui.status(_("searching for changes\n"))
1274 self.ui.status(_("searching for changes\n"))
1275
1275
1276 unknown = []
1276 unknown = []
1277 for h in heads:
1277 for h in heads:
1278 if h not in m:
1278 if h not in m:
1279 unknown.append(h)
1279 unknown.append(h)
1280 else:
1280 else:
1281 base[h] = 1
1281 base[h] = 1
1282
1282
1283 heads = unknown
1283 heads = unknown
1284 if not unknown:
1284 if not unknown:
1285 return base.keys(), [], []
1285 return base.keys(), [], []
1286
1286
1287 req = set(unknown)
1287 req = set(unknown)
1288 reqcnt = 0
1288 reqcnt = 0
1289
1289
1290 # search through remote branches
1290 # search through remote branches
1291 # a 'branch' here is a linear segment of history, with four parts:
1291 # a 'branch' here is a linear segment of history, with four parts:
1292 # head, root, first parent, second parent
1292 # head, root, first parent, second parent
1293 # (a branch always has two parents (or none) by definition)
1293 # (a branch always has two parents (or none) by definition)
1294 unknown = remote.branches(unknown)
1294 unknown = remote.branches(unknown)
1295 while unknown:
1295 while unknown:
1296 r = []
1296 r = []
1297 while unknown:
1297 while unknown:
1298 n = unknown.pop(0)
1298 n = unknown.pop(0)
1299 if n[0] in seen:
1299 if n[0] in seen:
1300 continue
1300 continue
1301
1301
1302 self.ui.debug(_("examining %s:%s\n")
1302 self.ui.debug(_("examining %s:%s\n")
1303 % (short(n[0]), short(n[1])))
1303 % (short(n[0]), short(n[1])))
1304 if n[0] == nullid: # found the end of the branch
1304 if n[0] == nullid: # found the end of the branch
1305 pass
1305 pass
1306 elif n in seenbranch:
1306 elif n in seenbranch:
1307 self.ui.debug(_("branch already found\n"))
1307 self.ui.debug(_("branch already found\n"))
1308 continue
1308 continue
1309 elif n[1] and n[1] in m: # do we know the base?
1309 elif n[1] and n[1] in m: # do we know the base?
1310 self.ui.debug(_("found incomplete branch %s:%s\n")
1310 self.ui.debug(_("found incomplete branch %s:%s\n")
1311 % (short(n[0]), short(n[1])))
1311 % (short(n[0]), short(n[1])))
1312 search.append(n[0:2]) # schedule branch range for scanning
1312 search.append(n[0:2]) # schedule branch range for scanning
1313 seenbranch.add(n)
1313 seenbranch.add(n)
1314 else:
1314 else:
1315 if n[1] not in seen and n[1] not in fetch:
1315 if n[1] not in seen and n[1] not in fetch:
1316 if n[2] in m and n[3] in m:
1316 if n[2] in m and n[3] in m:
1317 self.ui.debug(_("found new changeset %s\n") %
1317 self.ui.debug(_("found new changeset %s\n") %
1318 short(n[1]))
1318 short(n[1]))
1319 fetch.add(n[1]) # earliest unknown
1319 fetch.add(n[1]) # earliest unknown
1320 for p in n[2:4]:
1320 for p in n[2:4]:
1321 if p in m:
1321 if p in m:
1322 base[p] = 1 # latest known
1322 base[p] = 1 # latest known
1323
1323
1324 for p in n[2:4]:
1324 for p in n[2:4]:
1325 if p not in req and p not in m:
1325 if p not in req and p not in m:
1326 r.append(p)
1326 r.append(p)
1327 req.add(p)
1327 req.add(p)
1328 seen.add(n[0])
1328 seen.add(n[0])
1329
1329
1330 if r:
1330 if r:
1331 reqcnt += 1
1331 reqcnt += 1
1332 self.ui.debug(_("request %d: %s\n") %
1332 self.ui.debug(_("request %d: %s\n") %
1333 (reqcnt, " ".join(map(short, r))))
1333 (reqcnt, " ".join(map(short, r))))
1334 for p in xrange(0, len(r), 10):
1334 for p in xrange(0, len(r), 10):
1335 for b in remote.branches(r[p:p+10]):
1335 for b in remote.branches(r[p:p+10]):
1336 self.ui.debug(_("received %s:%s\n") %
1336 self.ui.debug(_("received %s:%s\n") %
1337 (short(b[0]), short(b[1])))
1337 (short(b[0]), short(b[1])))
1338 unknown.append(b)
1338 unknown.append(b)
1339
1339
1340 # do binary search on the branches we found
1340 # do binary search on the branches we found
1341 while search:
1341 while search:
1342 newsearch = []
1342 newsearch = []
1343 reqcnt += 1
1343 reqcnt += 1
1344 for n, l in zip(search, remote.between(search)):
1344 for n, l in zip(search, remote.between(search)):
1345 l.append(n[1])
1345 l.append(n[1])
1346 p = n[0]
1346 p = n[0]
1347 f = 1
1347 f = 1
1348 for i in l:
1348 for i in l:
1349 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1349 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1350 if i in m:
1350 if i in m:
1351 if f <= 2:
1351 if f <= 2:
1352 self.ui.debug(_("found new branch changeset %s\n") %
1352 self.ui.debug(_("found new branch changeset %s\n") %
1353 short(p))
1353 short(p))
1354 fetch.add(p)
1354 fetch.add(p)
1355 base[i] = 1
1355 base[i] = 1
1356 else:
1356 else:
1357 self.ui.debug(_("narrowed branch search to %s:%s\n")
1357 self.ui.debug(_("narrowed branch search to %s:%s\n")
1358 % (short(p), short(i)))
1358 % (short(p), short(i)))
1359 newsearch.append((p, i))
1359 newsearch.append((p, i))
1360 break
1360 break
1361 p, f = i, f * 2
1361 p, f = i, f * 2
1362 search = newsearch
1362 search = newsearch
1363
1363
1364 # sanity check our fetch list
1364 # sanity check our fetch list
1365 for f in fetch:
1365 for f in fetch:
1366 if f in m:
1366 if f in m:
1367 raise error.RepoError(_("already have changeset ")
1367 raise error.RepoError(_("already have changeset ")
1368 + short(f[:4]))
1368 + short(f[:4]))
1369
1369
1370 if base.keys() == [nullid]:
1370 if base.keys() == [nullid]:
1371 if force:
1371 if force:
1372 self.ui.warn(_("warning: repository is unrelated\n"))
1372 self.ui.warn(_("warning: repository is unrelated\n"))
1373 else:
1373 else:
1374 raise util.Abort(_("repository is unrelated"))
1374 raise util.Abort(_("repository is unrelated"))
1375
1375
1376 self.ui.debug(_("found new changesets starting at ") +
1376 self.ui.debug(_("found new changesets starting at ") +
1377 " ".join([short(f) for f in fetch]) + "\n")
1377 " ".join([short(f) for f in fetch]) + "\n")
1378
1378
1379 self.ui.debug(_("%d total queries\n") % reqcnt)
1379 self.ui.debug(_("%d total queries\n") % reqcnt)
1380
1380
1381 return base.keys(), list(fetch), heads
1381 return base.keys(), list(fetch), heads
1382
1382
1383 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 def findoutgoing(self, remote, base=None, heads=None, force=False):
1384 """Return list of nodes that are roots of subsets not in remote
1384 """Return list of nodes that are roots of subsets not in remote
1385
1385
1386 If base dict is specified, assume that these nodes and their parents
1386 If base dict is specified, assume that these nodes and their parents
1387 exist on the remote side.
1387 exist on the remote side.
1388 If a list of heads is specified, return only nodes which are heads
1388 If a list of heads is specified, return only nodes which are heads
1389 or ancestors of these heads, and return a second element which
1389 or ancestors of these heads, and return a second element which
1390 contains all remote heads which get new children.
1390 contains all remote heads which get new children.
1391 """
1391 """
1392 if base is None:
1392 if base is None:
1393 base = {}
1393 base = {}
1394 self.findincoming(remote, base, heads, force=force)
1394 self.findincoming(remote, base, heads, force=force)
1395
1395
1396 self.ui.debug(_("common changesets up to ")
1396 self.ui.debug(_("common changesets up to ")
1397 + " ".join(map(short, base.keys())) + "\n")
1397 + " ".join(map(short, base.keys())) + "\n")
1398
1398
1399 remain = set(self.changelog.nodemap)
1399 remain = set(self.changelog.nodemap)
1400
1400
1401 # prune everything remote has from the tree
1401 # prune everything remote has from the tree
1402 remain.remove(nullid)
1402 remain.remove(nullid)
1403 remove = base.keys()
1403 remove = base.keys()
1404 while remove:
1404 while remove:
1405 n = remove.pop(0)
1405 n = remove.pop(0)
1406 if n in remain:
1406 if n in remain:
1407 remain.remove(n)
1407 remain.remove(n)
1408 for p in self.changelog.parents(n):
1408 for p in self.changelog.parents(n):
1409 remove.append(p)
1409 remove.append(p)
1410
1410
1411 # find every node whose parents have been pruned
1411 # find every node whose parents have been pruned
1412 subset = []
1412 subset = []
1413 # find every remote head that will get new children
1413 # find every remote head that will get new children
1414 updated_heads = set()
1414 updated_heads = set()
1415 for n in remain:
1415 for n in remain:
1416 p1, p2 = self.changelog.parents(n)
1416 p1, p2 = self.changelog.parents(n)
1417 if p1 not in remain and p2 not in remain:
1417 if p1 not in remain and p2 not in remain:
1418 subset.append(n)
1418 subset.append(n)
1419 if heads:
1419 if heads:
1420 if p1 in heads:
1420 if p1 in heads:
1421 updated_heads.add(p1)
1421 updated_heads.add(p1)
1422 if p2 in heads:
1422 if p2 in heads:
1423 updated_heads.add(p2)
1423 updated_heads.add(p2)
1424
1424
1425 # this is the set of all roots we have to push
1425 # this is the set of all roots we have to push
1426 if heads:
1426 if heads:
1427 return subset, list(updated_heads)
1427 return subset, list(updated_heads)
1428 else:
1428 else:
1429 return subset
1429 return subset
1430
1430
1431 def pull(self, remote, heads=None, force=False):
1431 def pull(self, remote, heads=None, force=False):
1432 lock = self.lock()
1432 lock = self.lock()
1433 try:
1433 try:
1434 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1434 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1435 force=force)
1435 force=force)
1436 if fetch == [nullid]:
1436 if fetch == [nullid]:
1437 self.ui.status(_("requesting all changes\n"))
1437 self.ui.status(_("requesting all changes\n"))
1438
1438
1439 if not fetch:
1439 if not fetch:
1440 self.ui.status(_("no changes found\n"))
1440 self.ui.status(_("no changes found\n"))
1441 return 0
1441 return 0
1442
1442
1443 if heads is None and remote.capable('changegroupsubset'):
1443 if heads is None and remote.capable('changegroupsubset'):
1444 heads = rheads
1444 heads = rheads
1445
1445
1446 if heads is None:
1446 if heads is None:
1447 cg = remote.changegroup(fetch, 'pull')
1447 cg = remote.changegroup(fetch, 'pull')
1448 else:
1448 else:
1449 if not remote.capable('changegroupsubset'):
1449 if not remote.capable('changegroupsubset'):
1450 raise util.Abort(_("Partial pull cannot be done because "
1450 raise util.Abort(_("Partial pull cannot be done because "
1451 "other repository doesn't support "
1451 "other repository doesn't support "
1452 "changegroupsubset."))
1452 "changegroupsubset."))
1453 cg = remote.changegroupsubset(fetch, heads, 'pull')
1453 cg = remote.changegroupsubset(fetch, heads, 'pull')
1454 return self.addchangegroup(cg, 'pull', remote.url())
1454 return self.addchangegroup(cg, 'pull', remote.url())
1455 finally:
1455 finally:
1456 lock.release()
1456 lock.release()
1457
1457
1458 def push(self, remote, force=False, revs=None):
1458 def push(self, remote, force=False, revs=None):
1459 # there are two ways to push to remote repo:
1459 # there are two ways to push to remote repo:
1460 #
1460 #
1461 # addchangegroup assumes local user can lock remote
1461 # addchangegroup assumes local user can lock remote
1462 # repo (local filesystem, old ssh servers).
1462 # repo (local filesystem, old ssh servers).
1463 #
1463 #
1464 # unbundle assumes local user cannot lock remote repo (new ssh
1464 # unbundle assumes local user cannot lock remote repo (new ssh
1465 # servers, http servers).
1465 # servers, http servers).
1466
1466
1467 if remote.capable('unbundle'):
1467 if remote.capable('unbundle'):
1468 return self.push_unbundle(remote, force, revs)
1468 return self.push_unbundle(remote, force, revs)
1469 return self.push_addchangegroup(remote, force, revs)
1469 return self.push_addchangegroup(remote, force, revs)
1470
1470
1471 def prepush(self, remote, force, revs):
1471 def prepush(self, remote, force, revs):
1472 common = {}
1472 common = {}
1473 remote_heads = remote.heads()
1473 remote_heads = remote.heads()
1474 inc = self.findincoming(remote, common, remote_heads, force=force)
1474 inc = self.findincoming(remote, common, remote_heads, force=force)
1475
1475
1476 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1476 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1477 if revs is not None:
1477 if revs is not None:
1478 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1478 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1479 else:
1479 else:
1480 bases, heads = update, self.changelog.heads()
1480 bases, heads = update, self.changelog.heads()
1481
1481
1482 def checkbranch(lheads, rheads, updatelh):
1482 def checkbranch(lheads, rheads, updatelh):
1483 '''
1483 '''
1484 check whether there are more local heads than remote heads on
1484 check whether there are more local heads than remote heads on
1485 a specific branch.
1485 a specific branch.
1486
1486
1487 lheads: local branch heads
1487 lheads: local branch heads
1488 rheads: remote branch heads
1488 rheads: remote branch heads
1489 updatelh: outgoing local branch heads
1489 updatelh: outgoing local branch heads
1490 '''
1490 '''
1491
1491
1492 warn = 0
1492 warn = 0
1493
1493
1494 if not revs and len(lheads) > len(rheads):
1494 if not revs and len(lheads) > len(rheads):
1495 warn = 1
1495 warn = 1
1496 else:
1496 else:
1497 updatelheads = [self.changelog.heads(x, lheads)
1497 updatelheads = [self.changelog.heads(x, lheads)
1498 for x in updatelh]
1498 for x in updatelh]
1499 newheads = set(sum(updatelheads, [])) & set(lheads)
1499 newheads = set(sum(updatelheads, [])) & set(lheads)
1500
1500
1501 if not newheads:
1501 if not newheads:
1502 return True
1502 return True
1503
1503
1504 for r in rheads:
1504 for r in rheads:
1505 if r in self.changelog.nodemap:
1505 if r in self.changelog.nodemap:
1506 desc = self.changelog.heads(r, heads)
1506 desc = self.changelog.heads(r, heads)
1507 l = [h for h in heads if h in desc]
1507 l = [h for h in heads if h in desc]
1508 if not l:
1508 if not l:
1509 newheads.add(r)
1509 newheads.add(r)
1510 else:
1510 else:
1511 newheads.add(r)
1511 newheads.add(r)
1512 if len(newheads) > len(rheads):
1512 if len(newheads) > len(rheads):
1513 warn = 1
1513 warn = 1
1514
1514
1515 if warn:
1515 if warn:
1516 if not rheads: # new branch requires --force
1516 if not rheads: # new branch requires --force
1517 self.ui.warn(_("abort: push creates new"
1517 self.ui.warn(_("abort: push creates new"
1518 " remote branch '%s'!\n" %
1518 " remote branch '%s'!\n" %
1519 self[updatelh[0]].branch()))
1519 self[updatelh[0]].branch()))
1520 else:
1520 else:
1521 self.ui.warn(_("abort: push creates new remote heads!\n"))
1521 self.ui.warn(_("abort: push creates new remote heads!\n"))
1522
1522
1523 self.ui.status(_("(did you forget to merge?"
1523 self.ui.status(_("(did you forget to merge?"
1524 " use push -f to force)\n"))
1524 " use push -f to force)\n"))
1525 return False
1525 return False
1526 return True
1526 return True
1527
1527
1528 if not bases:
1528 if not bases:
1529 self.ui.status(_("no changes found\n"))
1529 self.ui.status(_("no changes found\n"))
1530 return None, 1
1530 return None, 1
1531 elif not force:
1531 elif not force:
1532 # Check for each named branch if we're creating new remote heads.
1532 # Check for each named branch if we're creating new remote heads.
1533 # To be a remote head after push, node must be either:
1533 # To be a remote head after push, node must be either:
1534 # - unknown locally
1534 # - unknown locally
1535 # - a local outgoing head descended from update
1535 # - a local outgoing head descended from update
1536 # - a remote head that's known locally and not
1536 # - a remote head that's known locally and not
1537 # ancestral to an outgoing head
1537 # ancestral to an outgoing head
1538 #
1538 #
1539 # New named branches cannot be created without --force.
1539 # New named branches cannot be created without --force.
1540
1540
1541 if remote_heads != [nullid]:
1541 if remote_heads != [nullid]:
1542 if remote.capable('branchmap'):
1542 if remote.capable('branchmap'):
1543 localhds = {}
1543 localhds = {}
1544 if not revs:
1544 if not revs:
1545 localhds = self.branchmap()
1545 localhds = self.branchmap()
1546 else:
1546 else:
1547 for n in heads:
1547 for n in heads:
1548 branch = self[n].branch()
1548 branch = self[n].branch()
1549 if branch in localhds:
1549 if branch in localhds:
1550 localhds[branch].append(n)
1550 localhds[branch].append(n)
1551 else:
1551 else:
1552 localhds[branch] = [n]
1552 localhds[branch] = [n]
1553
1553
1554 remotehds = remote.branchmap()
1554 remotehds = remote.branchmap()
1555
1555
1556 for lh in localhds:
1556 for lh in localhds:
1557 if lh in remotehds:
1557 if lh in remotehds:
1558 rheads = remotehds[lh]
1558 rheads = remotehds[lh]
1559 else:
1559 else:
1560 rheads = []
1560 rheads = []
1561 lheads = localhds[lh]
1561 lheads = localhds[lh]
1562 updatelh = [upd for upd in update
1562 updatelh = [upd for upd in update
1563 if self[upd].branch() == lh]
1563 if self[upd].branch() == lh]
1564 if not updatelh:
1564 if not updatelh:
1565 continue
1565 continue
1566 if not checkbranch(lheads, rheads, updatelh):
1566 if not checkbranch(lheads, rheads, updatelh):
1567 return None, 0
1567 return None, 0
1568 else:
1568 else:
1569 if not checkbranch(heads, remote_heads, update):
1569 if not checkbranch(heads, remote_heads, update):
1570 return None, 0
1570 return None, 0
1571
1571
1572 if inc:
1572 if inc:
1573 self.ui.warn(_("note: unsynced remote changes!\n"))
1573 self.ui.warn(_("note: unsynced remote changes!\n"))
1574
1574
1575
1575
1576 if revs is None:
1576 if revs is None:
1577 # use the fast path, no race possible on push
1577 # use the fast path, no race possible on push
1578 cg = self._changegroup(common.keys(), 'push')
1578 cg = self._changegroup(common.keys(), 'push')
1579 else:
1579 else:
1580 cg = self.changegroupsubset(update, revs, 'push')
1580 cg = self.changegroupsubset(update, revs, 'push')
1581 return cg, remote_heads
1581 return cg, remote_heads
1582
1582
1583 def push_addchangegroup(self, remote, force, revs):
1583 def push_addchangegroup(self, remote, force, revs):
1584 lock = remote.lock()
1584 lock = remote.lock()
1585 try:
1585 try:
1586 ret = self.prepush(remote, force, revs)
1586 ret = self.prepush(remote, force, revs)
1587 if ret[0] is not None:
1587 if ret[0] is not None:
1588 cg, remote_heads = ret
1588 cg, remote_heads = ret
1589 return remote.addchangegroup(cg, 'push', self.url())
1589 return remote.addchangegroup(cg, 'push', self.url())
1590 return ret[1]
1590 return ret[1]
1591 finally:
1591 finally:
1592 lock.release()
1592 lock.release()
1593
1593
1594 def push_unbundle(self, remote, force, revs):
1594 def push_unbundle(self, remote, force, revs):
1595 # local repo finds heads on server, finds out what revs it
1595 # local repo finds heads on server, finds out what revs it
1596 # must push. once revs transferred, if server finds it has
1596 # must push. once revs transferred, if server finds it has
1597 # different heads (someone else won commit/push race), server
1597 # different heads (someone else won commit/push race), server
1598 # aborts.
1598 # aborts.
1599
1599
1600 ret = self.prepush(remote, force, revs)
1600 ret = self.prepush(remote, force, revs)
1601 if ret[0] is not None:
1601 if ret[0] is not None:
1602 cg, remote_heads = ret
1602 cg, remote_heads = ret
1603 if force: remote_heads = ['force']
1603 if force: remote_heads = ['force']
1604 return remote.unbundle(cg, remote_heads, 'push')
1604 return remote.unbundle(cg, remote_heads, 'push')
1605 return ret[1]
1605 return ret[1]
1606
1606
1607 def changegroupinfo(self, nodes, source):
1607 def changegroupinfo(self, nodes, source):
1608 if self.ui.verbose or source == 'bundle':
1608 if self.ui.verbose or source == 'bundle':
1609 self.ui.status(_("%d changesets found\n") % len(nodes))
1609 self.ui.status(_("%d changesets found\n") % len(nodes))
1610 if self.ui.debugflag:
1610 if self.ui.debugflag:
1611 self.ui.debug(_("list of changesets:\n"))
1611 self.ui.debug(_("list of changesets:\n"))
1612 for node in nodes:
1612 for node in nodes:
1613 self.ui.debug("%s\n" % hex(node))
1613 self.ui.debug("%s\n" % hex(node))
1614
1614
1615 def changegroupsubset(self, bases, heads, source, extranodes=None):
1615 def changegroupsubset(self, bases, heads, source, extranodes=None):
1616 """This function generates a changegroup consisting of all the nodes
1616 """This function generates a changegroup consisting of all the nodes
1617 that are descendents of any of the bases, and ancestors of any of
1617 that are descendents of any of the bases, and ancestors of any of
1618 the heads.
1618 the heads.
1619
1619
1620 It is fairly complex as determining which filenodes and which
1620 It is fairly complex as determining which filenodes and which
1621 manifest nodes need to be included for the changeset to be complete
1621 manifest nodes need to be included for the changeset to be complete
1622 is non-trivial.
1622 is non-trivial.
1623
1623
1624 Another wrinkle is doing the reverse, figuring out which changeset in
1624 Another wrinkle is doing the reverse, figuring out which changeset in
1625 the changegroup a particular filenode or manifestnode belongs to.
1625 the changegroup a particular filenode or manifestnode belongs to.
1626
1626
1627 The caller can specify some nodes that must be included in the
1627 The caller can specify some nodes that must be included in the
1628 changegroup using the extranodes argument. It should be a dict
1628 changegroup using the extranodes argument. It should be a dict
1629 where the keys are the filenames (or 1 for the manifest), and the
1629 where the keys are the filenames (or 1 for the manifest), and the
1630 values are lists of (node, linknode) tuples, where node is a wanted
1630 values are lists of (node, linknode) tuples, where node is a wanted
1631 node and linknode is the changelog node that should be transmitted as
1631 node and linknode is the changelog node that should be transmitted as
1632 the linkrev.
1632 the linkrev.
1633 """
1633 """
1634
1634
1635 if extranodes is None:
1635 if extranodes is None:
1636 # can we go through the fast path ?
1636 # can we go through the fast path ?
1637 heads.sort()
1637 heads.sort()
1638 allheads = self.heads()
1638 allheads = self.heads()
1639 allheads.sort()
1639 allheads.sort()
1640 if heads == allheads:
1640 if heads == allheads:
1641 common = []
1641 common = []
1642 # parents of bases are known from both sides
1642 # parents of bases are known from both sides
1643 for n in bases:
1643 for n in bases:
1644 for p in self.changelog.parents(n):
1644 for p in self.changelog.parents(n):
1645 if p != nullid:
1645 if p != nullid:
1646 common.append(p)
1646 common.append(p)
1647 return self._changegroup(common, source)
1647 return self._changegroup(common, source)
1648
1648
1649 self.hook('preoutgoing', throw=True, source=source)
1649 self.hook('preoutgoing', throw=True, source=source)
1650
1650
1651 # Set up some initial variables
1651 # Set up some initial variables
1652 # Make it easy to refer to self.changelog
1652 # Make it easy to refer to self.changelog
1653 cl = self.changelog
1653 cl = self.changelog
1654 # msng is short for missing - compute the list of changesets in this
1654 # msng is short for missing - compute the list of changesets in this
1655 # changegroup.
1655 # changegroup.
1656 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1656 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1657 self.changegroupinfo(msng_cl_lst, source)
1657 self.changegroupinfo(msng_cl_lst, source)
1658 # Some bases may turn out to be superfluous, and some heads may be
1658 # Some bases may turn out to be superfluous, and some heads may be
1659 # too. nodesbetween will return the minimal set of bases and heads
1659 # too. nodesbetween will return the minimal set of bases and heads
1660 # necessary to re-create the changegroup.
1660 # necessary to re-create the changegroup.
1661
1661
1662 # Known heads are the list of heads that it is assumed the recipient
1662 # Known heads are the list of heads that it is assumed the recipient
1663 # of this changegroup will know about.
1663 # of this changegroup will know about.
1664 knownheads = set()
1664 knownheads = set()
1665 # We assume that all parents of bases are known heads.
1665 # We assume that all parents of bases are known heads.
1666 for n in bases:
1666 for n in bases:
1667 knownheads.update(cl.parents(n))
1667 knownheads.update(cl.parents(n))
1668 knownheads.discard(nullid)
1668 knownheads.discard(nullid)
1669 knownheads = list(knownheads)
1669 knownheads = list(knownheads)
1670 if knownheads:
1670 if knownheads:
1671 # Now that we know what heads are known, we can compute which
1671 # Now that we know what heads are known, we can compute which
1672 # changesets are known. The recipient must know about all
1672 # changesets are known. The recipient must know about all
1673 # changesets required to reach the known heads from the null
1673 # changesets required to reach the known heads from the null
1674 # changeset.
1674 # changeset.
1675 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1675 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1676 junk = None
1676 junk = None
1677 # Transform the list into a set.
1677 # Transform the list into a set.
1678 has_cl_set = set(has_cl_set)
1678 has_cl_set = set(has_cl_set)
1679 else:
1679 else:
1680 # If there were no known heads, the recipient cannot be assumed to
1680 # If there were no known heads, the recipient cannot be assumed to
1681 # know about any changesets.
1681 # know about any changesets.
1682 has_cl_set = set()
1682 has_cl_set = set()
1683
1683
1684 # Make it easy to refer to self.manifest
1684 # Make it easy to refer to self.manifest
1685 mnfst = self.manifest
1685 mnfst = self.manifest
1686 # We don't know which manifests are missing yet
1686 # We don't know which manifests are missing yet
1687 msng_mnfst_set = {}
1687 msng_mnfst_set = {}
1688 # Nor do we know which filenodes are missing.
1688 # Nor do we know which filenodes are missing.
1689 msng_filenode_set = {}
1689 msng_filenode_set = {}
1690
1690
1691 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1691 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1692 junk = None
1692 junk = None
1693
1693
1694 # A changeset always belongs to itself, so the changenode lookup
1694 # A changeset always belongs to itself, so the changenode lookup
1695 # function for a changenode is identity.
1695 # function for a changenode is identity.
1696 def identity(x):
1696 def identity(x):
1697 return x
1697 return x
1698
1698
1699 # A function generating function. Sets up an environment for the
1699 # A function generating function. Sets up an environment for the
1700 # inner function.
1700 # inner function.
1701 def cmp_by_rev_func(revlog):
1701 def cmp_by_rev_func(revlog):
1702 # Compare two nodes by their revision number in the environment's
1702 # Compare two nodes by their revision number in the environment's
1703 # revision history. Since the revision number both represents the
1703 # revision history. Since the revision number both represents the
1704 # most efficient order to read the nodes in, and represents a
1704 # most efficient order to read the nodes in, and represents a
1705 # topological sorting of the nodes, this function is often useful.
1705 # topological sorting of the nodes, this function is often useful.
1706 def cmp_by_rev(a, b):
1706 def cmp_by_rev(a, b):
1707 return cmp(revlog.rev(a), revlog.rev(b))
1707 return cmp(revlog.rev(a), revlog.rev(b))
1708 return cmp_by_rev
1708 return cmp_by_rev
1709
1709
1710 # If we determine that a particular file or manifest node must be a
1710 # If we determine that a particular file or manifest node must be a
1711 # node that the recipient of the changegroup will already have, we can
1711 # node that the recipient of the changegroup will already have, we can
1712 # also assume the recipient will have all the parents. This function
1712 # also assume the recipient will have all the parents. This function
1713 # prunes them from the set of missing nodes.
1713 # prunes them from the set of missing nodes.
1714 def prune_parents(revlog, hasset, msngset):
1714 def prune_parents(revlog, hasset, msngset):
1715 haslst = list(hasset)
1715 haslst = list(hasset)
1716 haslst.sort(cmp_by_rev_func(revlog))
1716 haslst.sort(cmp_by_rev_func(revlog))
1717 for node in haslst:
1717 for node in haslst:
1718 parentlst = [p for p in revlog.parents(node) if p != nullid]
1718 parentlst = [p for p in revlog.parents(node) if p != nullid]
1719 while parentlst:
1719 while parentlst:
1720 n = parentlst.pop()
1720 n = parentlst.pop()
1721 if n not in hasset:
1721 if n not in hasset:
1722 hasset.add(n)
1722 hasset.add(n)
1723 p = [p for p in revlog.parents(n) if p != nullid]
1723 p = [p for p in revlog.parents(n) if p != nullid]
1724 parentlst.extend(p)
1724 parentlst.extend(p)
1725 for n in hasset:
1725 for n in hasset:
1726 msngset.pop(n, None)
1726 msngset.pop(n, None)
1727
1727
1728 # This is a function generating function used to set up an environment
1728 # This is a function generating function used to set up an environment
1729 # for the inner function to execute in.
1729 # for the inner function to execute in.
1730 def manifest_and_file_collector(changedfileset):
1730 def manifest_and_file_collector(changedfileset):
1731 # This is an information gathering function that gathers
1731 # This is an information gathering function that gathers
1732 # information from each changeset node that goes out as part of
1732 # information from each changeset node that goes out as part of
1733 # the changegroup. The information gathered is a list of which
1733 # the changegroup. The information gathered is a list of which
1734 # manifest nodes are potentially required (the recipient may
1734 # manifest nodes are potentially required (the recipient may
1735 # already have them) and total list of all files which were
1735 # already have them) and total list of all files which were
1736 # changed in any changeset in the changegroup.
1736 # changed in any changeset in the changegroup.
1737 #
1737 #
1738 # We also remember the first changenode we saw any manifest
1738 # We also remember the first changenode we saw any manifest
1739 # referenced by so we can later determine which changenode 'owns'
1739 # referenced by so we can later determine which changenode 'owns'
1740 # the manifest.
1740 # the manifest.
1741 def collect_manifests_and_files(clnode):
1741 def collect_manifests_and_files(clnode):
1742 c = cl.read(clnode)
1742 c = cl.read(clnode)
1743 for f in c[3]:
1743 for f in c[3]:
1744 # This is to make sure we only have one instance of each
1744 # This is to make sure we only have one instance of each
1745 # filename string for each filename.
1745 # filename string for each filename.
1746 changedfileset.setdefault(f, f)
1746 changedfileset.setdefault(f, f)
1747 msng_mnfst_set.setdefault(c[0], clnode)
1747 msng_mnfst_set.setdefault(c[0], clnode)
1748 return collect_manifests_and_files
1748 return collect_manifests_and_files
1749
1749
1750 # Figure out which manifest nodes (of the ones we think might be part
1750 # Figure out which manifest nodes (of the ones we think might be part
1751 # of the changegroup) the recipient must know about and remove them
1751 # of the changegroup) the recipient must know about and remove them
1752 # from the changegroup.
1752 # from the changegroup.
1753 def prune_manifests():
1753 def prune_manifests():
1754 has_mnfst_set = set()
1754 has_mnfst_set = set()
1755 for n in msng_mnfst_set:
1755 for n in msng_mnfst_set:
1756 # If a 'missing' manifest thinks it belongs to a changenode
1756 # If a 'missing' manifest thinks it belongs to a changenode
1757 # the recipient is assumed to have, obviously the recipient
1757 # the recipient is assumed to have, obviously the recipient
1758 # must have that manifest.
1758 # must have that manifest.
1759 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1759 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1760 if linknode in has_cl_set:
1760 if linknode in has_cl_set:
1761 has_mnfst_set.add(n)
1761 has_mnfst_set.add(n)
1762 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1762 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1763
1763
1764 # Use the information collected in collect_manifests_and_files to say
1764 # Use the information collected in collect_manifests_and_files to say
1765 # which changenode any manifestnode belongs to.
1765 # which changenode any manifestnode belongs to.
1766 def lookup_manifest_link(mnfstnode):
1766 def lookup_manifest_link(mnfstnode):
1767 return msng_mnfst_set[mnfstnode]
1767 return msng_mnfst_set[mnfstnode]
1768
1768
1769 # A function generating function that sets up the initial environment
1769 # A function generating function that sets up the initial environment
1770 # the inner function.
1770 # the inner function.
1771 def filenode_collector(changedfiles):
1771 def filenode_collector(changedfiles):
1772 next_rev = [0]
1772 next_rev = [0]
1773 # This gathers information from each manifestnode included in the
1773 # This gathers information from each manifestnode included in the
1774 # changegroup about which filenodes the manifest node references
1774 # changegroup about which filenodes the manifest node references
1775 # so we can include those in the changegroup too.
1775 # so we can include those in the changegroup too.
1776 #
1776 #
1777 # It also remembers which changenode each filenode belongs to. It
1777 # It also remembers which changenode each filenode belongs to. It
1778 # does this by assuming the a filenode belongs to the changenode
1778 # does this by assuming the a filenode belongs to the changenode
1779 # the first manifest that references it belongs to.
1779 # the first manifest that references it belongs to.
1780 def collect_msng_filenodes(mnfstnode):
1780 def collect_msng_filenodes(mnfstnode):
1781 r = mnfst.rev(mnfstnode)
1781 r = mnfst.rev(mnfstnode)
1782 if r == next_rev[0]:
1782 if r == next_rev[0]:
1783 # If the last rev we looked at was the one just previous,
1783 # If the last rev we looked at was the one just previous,
1784 # we only need to see a diff.
1784 # we only need to see a diff.
1785 deltamf = mnfst.readdelta(mnfstnode)
1785 deltamf = mnfst.readdelta(mnfstnode)
1786 # For each line in the delta
1786 # For each line in the delta
1787 for f, fnode in deltamf.iteritems():
1787 for f, fnode in deltamf.iteritems():
1788 f = changedfiles.get(f, None)
1788 f = changedfiles.get(f, None)
1789 # And if the file is in the list of files we care
1789 # And if the file is in the list of files we care
1790 # about.
1790 # about.
1791 if f is not None:
1791 if f is not None:
1792 # Get the changenode this manifest belongs to
1792 # Get the changenode this manifest belongs to
1793 clnode = msng_mnfst_set[mnfstnode]
1793 clnode = msng_mnfst_set[mnfstnode]
1794 # Create the set of filenodes for the file if
1794 # Create the set of filenodes for the file if
1795 # there isn't one already.
1795 # there isn't one already.
1796 ndset = msng_filenode_set.setdefault(f, {})
1796 ndset = msng_filenode_set.setdefault(f, {})
1797 # And set the filenode's changelog node to the
1797 # And set the filenode's changelog node to the
1798 # manifest's if it hasn't been set already.
1798 # manifest's if it hasn't been set already.
1799 ndset.setdefault(fnode, clnode)
1799 ndset.setdefault(fnode, clnode)
1800 else:
1800 else:
1801 # Otherwise we need a full manifest.
1801 # Otherwise we need a full manifest.
1802 m = mnfst.read(mnfstnode)
1802 m = mnfst.read(mnfstnode)
1803 # For every file in we care about.
1803 # For every file in we care about.
1804 for f in changedfiles:
1804 for f in changedfiles:
1805 fnode = m.get(f, None)
1805 fnode = m.get(f, None)
1806 # If it's in the manifest
1806 # If it's in the manifest
1807 if fnode is not None:
1807 if fnode is not None:
1808 # See comments above.
1808 # See comments above.
1809 clnode = msng_mnfst_set[mnfstnode]
1809 clnode = msng_mnfst_set[mnfstnode]
1810 ndset = msng_filenode_set.setdefault(f, {})
1810 ndset = msng_filenode_set.setdefault(f, {})
1811 ndset.setdefault(fnode, clnode)
1811 ndset.setdefault(fnode, clnode)
1812 # Remember the revision we hope to see next.
1812 # Remember the revision we hope to see next.
1813 next_rev[0] = r + 1
1813 next_rev[0] = r + 1
1814 return collect_msng_filenodes
1814 return collect_msng_filenodes
1815
1815
1816 # We have a list of filenodes we think we need for a file, lets remove
1816 # We have a list of filenodes we think we need for a file, lets remove
1817 # all those we know the recipient must have.
1817 # all those we know the recipient must have.
1818 def prune_filenodes(f, filerevlog):
1818 def prune_filenodes(f, filerevlog):
1819 msngset = msng_filenode_set[f]
1819 msngset = msng_filenode_set[f]
1820 hasset = set()
1820 hasset = set()
1821 # If a 'missing' filenode thinks it belongs to a changenode we
1821 # If a 'missing' filenode thinks it belongs to a changenode we
1822 # assume the recipient must have, then the recipient must have
1822 # assume the recipient must have, then the recipient must have
1823 # that filenode.
1823 # that filenode.
1824 for n in msngset:
1824 for n in msngset:
1825 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1825 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1826 if clnode in has_cl_set:
1826 if clnode in has_cl_set:
1827 hasset.add(n)
1827 hasset.add(n)
1828 prune_parents(filerevlog, hasset, msngset)
1828 prune_parents(filerevlog, hasset, msngset)
1829
1829
1830 # A function generator function that sets up the a context for the
1830 # A function generator function that sets up the a context for the
1831 # inner function.
1831 # inner function.
1832 def lookup_filenode_link_func(fname):
1832 def lookup_filenode_link_func(fname):
1833 msngset = msng_filenode_set[fname]
1833 msngset = msng_filenode_set[fname]
1834 # Lookup the changenode the filenode belongs to.
1834 # Lookup the changenode the filenode belongs to.
1835 def lookup_filenode_link(fnode):
1835 def lookup_filenode_link(fnode):
1836 return msngset[fnode]
1836 return msngset[fnode]
1837 return lookup_filenode_link
1837 return lookup_filenode_link
1838
1838
1839 # Add the nodes that were explicitly requested.
1839 # Add the nodes that were explicitly requested.
1840 def add_extra_nodes(name, nodes):
1840 def add_extra_nodes(name, nodes):
1841 if not extranodes or name not in extranodes:
1841 if not extranodes or name not in extranodes:
1842 return
1842 return
1843
1843
1844 for node, linknode in extranodes[name]:
1844 for node, linknode in extranodes[name]:
1845 if node not in nodes:
1845 if node not in nodes:
1846 nodes[node] = linknode
1846 nodes[node] = linknode
1847
1847
1848 # Now that we have all theses utility functions to help out and
1848 # Now that we have all theses utility functions to help out and
1849 # logically divide up the task, generate the group.
1849 # logically divide up the task, generate the group.
1850 def gengroup():
1850 def gengroup():
1851 # The set of changed files starts empty.
1851 # The set of changed files starts empty.
1852 changedfiles = {}
1852 changedfiles = {}
1853 # Create a changenode group generator that will call our functions
1853 # Create a changenode group generator that will call our functions
1854 # back to lookup the owning changenode and collect information.
1854 # back to lookup the owning changenode and collect information.
1855 group = cl.group(msng_cl_lst, identity,
1855 group = cl.group(msng_cl_lst, identity,
1856 manifest_and_file_collector(changedfiles))
1856 manifest_and_file_collector(changedfiles))
1857 for chnk in group:
1857 for chnk in group:
1858 yield chnk
1858 yield chnk
1859
1859
1860 # The list of manifests has been collected by the generator
1860 # The list of manifests has been collected by the generator
1861 # calling our functions back.
1861 # calling our functions back.
1862 prune_manifests()
1862 prune_manifests()
1863 add_extra_nodes(1, msng_mnfst_set)
1863 add_extra_nodes(1, msng_mnfst_set)
1864 msng_mnfst_lst = msng_mnfst_set.keys()
1864 msng_mnfst_lst = msng_mnfst_set.keys()
1865 # Sort the manifestnodes by revision number.
1865 # Sort the manifestnodes by revision number.
1866 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1866 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1867 # Create a generator for the manifestnodes that calls our lookup
1867 # Create a generator for the manifestnodes that calls our lookup
1868 # and data collection functions back.
1868 # and data collection functions back.
1869 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1869 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1870 filenode_collector(changedfiles))
1870 filenode_collector(changedfiles))
1871 for chnk in group:
1871 for chnk in group:
1872 yield chnk
1872 yield chnk
1873
1873
1874 # These are no longer needed, dereference and toss the memory for
1874 # These are no longer needed, dereference and toss the memory for
1875 # them.
1875 # them.
1876 msng_mnfst_lst = None
1876 msng_mnfst_lst = None
1877 msng_mnfst_set.clear()
1877 msng_mnfst_set.clear()
1878
1878
1879 if extranodes:
1879 if extranodes:
1880 for fname in extranodes:
1880 for fname in extranodes:
1881 if isinstance(fname, int):
1881 if isinstance(fname, int):
1882 continue
1882 continue
1883 msng_filenode_set.setdefault(fname, {})
1883 msng_filenode_set.setdefault(fname, {})
1884 changedfiles[fname] = 1
1884 changedfiles[fname] = 1
1885 # Go through all our files in order sorted by name.
1885 # Go through all our files in order sorted by name.
1886 for fname in sorted(changedfiles):
1886 for fname in sorted(changedfiles):
1887 filerevlog = self.file(fname)
1887 filerevlog = self.file(fname)
1888 if not len(filerevlog):
1888 if not len(filerevlog):
1889 raise util.Abort(_("empty or missing revlog for %s") % fname)
1889 raise util.Abort(_("empty or missing revlog for %s") % fname)
1890 # Toss out the filenodes that the recipient isn't really
1890 # Toss out the filenodes that the recipient isn't really
1891 # missing.
1891 # missing.
1892 if fname in msng_filenode_set:
1892 if fname in msng_filenode_set:
1893 prune_filenodes(fname, filerevlog)
1893 prune_filenodes(fname, filerevlog)
1894 add_extra_nodes(fname, msng_filenode_set[fname])
1894 add_extra_nodes(fname, msng_filenode_set[fname])
1895 msng_filenode_lst = msng_filenode_set[fname].keys()
1895 msng_filenode_lst = msng_filenode_set[fname].keys()
1896 else:
1896 else:
1897 msng_filenode_lst = []
1897 msng_filenode_lst = []
1898 # If any filenodes are left, generate the group for them,
1898 # If any filenodes are left, generate the group for them,
1899 # otherwise don't bother.
1899 # otherwise don't bother.
1900 if len(msng_filenode_lst) > 0:
1900 if len(msng_filenode_lst) > 0:
1901 yield changegroup.chunkheader(len(fname))
1901 yield changegroup.chunkheader(len(fname))
1902 yield fname
1902 yield fname
1903 # Sort the filenodes by their revision #
1903 # Sort the filenodes by their revision #
1904 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1904 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1905 # Create a group generator and only pass in a changenode
1905 # Create a group generator and only pass in a changenode
1906 # lookup function as we need to collect no information
1906 # lookup function as we need to collect no information
1907 # from filenodes.
1907 # from filenodes.
1908 group = filerevlog.group(msng_filenode_lst,
1908 group = filerevlog.group(msng_filenode_lst,
1909 lookup_filenode_link_func(fname))
1909 lookup_filenode_link_func(fname))
1910 for chnk in group:
1910 for chnk in group:
1911 yield chnk
1911 yield chnk
1912 if fname in msng_filenode_set:
1912 if fname in msng_filenode_set:
1913 # Don't need this anymore, toss it to free memory.
1913 # Don't need this anymore, toss it to free memory.
1914 del msng_filenode_set[fname]
1914 del msng_filenode_set[fname]
1915 # Signal that no more groups are left.
1915 # Signal that no more groups are left.
1916 yield changegroup.closechunk()
1916 yield changegroup.closechunk()
1917
1917
1918 if msng_cl_lst:
1918 if msng_cl_lst:
1919 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1919 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1920
1920
1921 return util.chunkbuffer(gengroup())
1921 return util.chunkbuffer(gengroup())
1922
1922
1923 def changegroup(self, basenodes, source):
1923 def changegroup(self, basenodes, source):
1924 # to avoid a race we use changegroupsubset() (issue1320)
1924 # to avoid a race we use changegroupsubset() (issue1320)
1925 return self.changegroupsubset(basenodes, self.heads(), source)
1925 return self.changegroupsubset(basenodes, self.heads(), source)
1926
1926
1927 def _changegroup(self, common, source):
1927 def _changegroup(self, common, source):
1928 """Generate a changegroup of all nodes that we have that a recipient
1928 """Generate a changegroup of all nodes that we have that a recipient
1929 doesn't.
1929 doesn't.
1930
1930
1931 This is much easier than the previous function as we can assume that
1931 This is much easier than the previous function as we can assume that
1932 the recipient has any changenode we aren't sending them.
1932 the recipient has any changenode we aren't sending them.
1933
1933
1934 common is the set of common nodes between remote and self"""
1934 common is the set of common nodes between remote and self"""
1935
1935
1936 self.hook('preoutgoing', throw=True, source=source)
1936 self.hook('preoutgoing', throw=True, source=source)
1937
1937
1938 cl = self.changelog
1938 cl = self.changelog
1939 nodes = cl.findmissing(common)
1939 nodes = cl.findmissing(common)
1940 revset = set([cl.rev(n) for n in nodes])
1940 revset = set([cl.rev(n) for n in nodes])
1941 self.changegroupinfo(nodes, source)
1941 self.changegroupinfo(nodes, source)
1942
1942
1943 def identity(x):
1943 def identity(x):
1944 return x
1944 return x
1945
1945
1946 def gennodelst(log):
1946 def gennodelst(log):
1947 for r in log:
1947 for r in log:
1948 if log.linkrev(r) in revset:
1948 if log.linkrev(r) in revset:
1949 yield log.node(r)
1949 yield log.node(r)
1950
1950
1951 def changed_file_collector(changedfileset):
1951 def changed_file_collector(changedfileset):
1952 def collect_changed_files(clnode):
1952 def collect_changed_files(clnode):
1953 c = cl.read(clnode)
1953 c = cl.read(clnode)
1954 changedfileset.update(c[3])
1954 changedfileset.update(c[3])
1955 return collect_changed_files
1955 return collect_changed_files
1956
1956
1957 def lookuprevlink_func(revlog):
1957 def lookuprevlink_func(revlog):
1958 def lookuprevlink(n):
1958 def lookuprevlink(n):
1959 return cl.node(revlog.linkrev(revlog.rev(n)))
1959 return cl.node(revlog.linkrev(revlog.rev(n)))
1960 return lookuprevlink
1960 return lookuprevlink
1961
1961
1962 def gengroup():
1962 def gengroup():
1963 # construct a list of all changed files
1963 # construct a list of all changed files
1964 changedfiles = set()
1964 changedfiles = set()
1965
1965
1966 for chnk in cl.group(nodes, identity,
1966 for chnk in cl.group(nodes, identity,
1967 changed_file_collector(changedfiles)):
1967 changed_file_collector(changedfiles)):
1968 yield chnk
1968 yield chnk
1969
1969
1970 mnfst = self.manifest
1970 mnfst = self.manifest
1971 nodeiter = gennodelst(mnfst)
1971 nodeiter = gennodelst(mnfst)
1972 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1972 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1973 yield chnk
1973 yield chnk
1974
1974
1975 for fname in sorted(changedfiles):
1975 for fname in sorted(changedfiles):
1976 filerevlog = self.file(fname)
1976 filerevlog = self.file(fname)
1977 if not len(filerevlog):
1977 if not len(filerevlog):
1978 raise util.Abort(_("empty or missing revlog for %s") % fname)
1978 raise util.Abort(_("empty or missing revlog for %s") % fname)
1979 nodeiter = gennodelst(filerevlog)
1979 nodeiter = gennodelst(filerevlog)
1980 nodeiter = list(nodeiter)
1980 nodeiter = list(nodeiter)
1981 if nodeiter:
1981 if nodeiter:
1982 yield changegroup.chunkheader(len(fname))
1982 yield changegroup.chunkheader(len(fname))
1983 yield fname
1983 yield fname
1984 lookup = lookuprevlink_func(filerevlog)
1984 lookup = lookuprevlink_func(filerevlog)
1985 for chnk in filerevlog.group(nodeiter, lookup):
1985 for chnk in filerevlog.group(nodeiter, lookup):
1986 yield chnk
1986 yield chnk
1987
1987
1988 yield changegroup.closechunk()
1988 yield changegroup.closechunk()
1989
1989
1990 if nodes:
1990 if nodes:
1991 self.hook('outgoing', node=hex(nodes[0]), source=source)
1991 self.hook('outgoing', node=hex(nodes[0]), source=source)
1992
1992
1993 return util.chunkbuffer(gengroup())
1993 return util.chunkbuffer(gengroup())
1994
1994
1995 def addchangegroup(self, source, srctype, url, emptyok=False):
1995 def addchangegroup(self, source, srctype, url, emptyok=False):
1996 """add changegroup to repo.
1996 """add changegroup to repo.
1997
1997
1998 return values:
1998 return values:
1999 - nothing changed or no source: 0
1999 - nothing changed or no source: 0
2000 - more heads than before: 1+added heads (2..n)
2000 - more heads than before: 1+added heads (2..n)
2001 - less heads than before: -1-removed heads (-2..-n)
2001 - less heads than before: -1-removed heads (-2..-n)
2002 - number of heads stays the same: 1
2002 - number of heads stays the same: 1
2003 """
2003 """
2004 def csmap(x):
2004 def csmap(x):
2005 self.ui.debug(_("add changeset %s\n") % short(x))
2005 self.ui.debug(_("add changeset %s\n") % short(x))
2006 return len(cl)
2006 return len(cl)
2007
2007
2008 def revmap(x):
2008 def revmap(x):
2009 return cl.rev(x)
2009 return cl.rev(x)
2010
2010
2011 if not source:
2011 if not source:
2012 return 0
2012 return 0
2013
2013
2014 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2014 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2015
2015
2016 changesets = files = revisions = 0
2016 changesets = files = revisions = 0
2017
2017
2018 # write changelog data to temp files so concurrent readers will not see
2018 # write changelog data to temp files so concurrent readers will not see
2019 # inconsistent view
2019 # inconsistent view
2020 cl = self.changelog
2020 cl = self.changelog
2021 cl.delayupdate()
2021 cl.delayupdate()
2022 oldheads = len(cl.heads())
2022 oldheads = len(cl.heads())
2023
2023
2024 tr = self.transaction()
2024 tr = self.transaction()
2025 try:
2025 try:
2026 trp = weakref.proxy(tr)
2026 trp = weakref.proxy(tr)
2027 # pull off the changeset group
2027 # pull off the changeset group
2028 self.ui.status(_("adding changesets\n"))
2028 self.ui.status(_("adding changesets\n"))
2029 clstart = len(cl)
2029 clstart = len(cl)
2030 chunkiter = changegroup.chunkiter(source)
2030 chunkiter = changegroup.chunkiter(source)
2031 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2031 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2032 raise util.Abort(_("received changelog group is empty"))
2032 raise util.Abort(_("received changelog group is empty"))
2033 clend = len(cl)
2033 clend = len(cl)
2034 changesets = clend - clstart
2034 changesets = clend - clstart
2035
2035
2036 # pull off the manifest group
2036 # pull off the manifest group
2037 self.ui.status(_("adding manifests\n"))
2037 self.ui.status(_("adding manifests\n"))
2038 chunkiter = changegroup.chunkiter(source)
2038 chunkiter = changegroup.chunkiter(source)
2039 # no need to check for empty manifest group here:
2039 # no need to check for empty manifest group here:
2040 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2040 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2041 # no new manifest will be created and the manifest group will
2041 # no new manifest will be created and the manifest group will
2042 # be empty during the pull
2042 # be empty during the pull
2043 self.manifest.addgroup(chunkiter, revmap, trp)
2043 self.manifest.addgroup(chunkiter, revmap, trp)
2044
2044
2045 # process the files
2045 # process the files
2046 self.ui.status(_("adding file changes\n"))
2046 self.ui.status(_("adding file changes\n"))
2047 while 1:
2047 while 1:
2048 f = changegroup.getchunk(source)
2048 f = changegroup.getchunk(source)
2049 if not f:
2049 if not f:
2050 break
2050 break
2051 self.ui.debug(_("adding %s revisions\n") % f)
2051 self.ui.debug(_("adding %s revisions\n") % f)
2052 fl = self.file(f)
2052 fl = self.file(f)
2053 o = len(fl)
2053 o = len(fl)
2054 chunkiter = changegroup.chunkiter(source)
2054 chunkiter = changegroup.chunkiter(source)
2055 if fl.addgroup(chunkiter, revmap, trp) is None:
2055 if fl.addgroup(chunkiter, revmap, trp) is None:
2056 raise util.Abort(_("received file revlog group is empty"))
2056 raise util.Abort(_("received file revlog group is empty"))
2057 revisions += len(fl) - o
2057 revisions += len(fl) - o
2058 files += 1
2058 files += 1
2059
2059
2060 newheads = len(cl.heads())
2060 newheads = len(cl.heads())
2061 heads = ""
2061 heads = ""
2062 if oldheads and newheads != oldheads:
2062 if oldheads and newheads != oldheads:
2063 heads = _(" (%+d heads)") % (newheads - oldheads)
2063 heads = _(" (%+d heads)") % (newheads - oldheads)
2064
2064
2065 self.ui.status(_("added %d changesets"
2065 self.ui.status(_("added %d changesets"
2066 " with %d changes to %d files%s\n")
2066 " with %d changes to %d files%s\n")
2067 % (changesets, revisions, files, heads))
2067 % (changesets, revisions, files, heads))
2068
2068
2069 if changesets > 0:
2069 if changesets > 0:
2070 p = lambda: cl.writepending() and self.root or ""
2070 p = lambda: cl.writepending() and self.root or ""
2071 self.hook('pretxnchangegroup', throw=True,
2071 self.hook('pretxnchangegroup', throw=True,
2072 node=hex(cl.node(clstart)), source=srctype,
2072 node=hex(cl.node(clstart)), source=srctype,
2073 url=url, pending=p)
2073 url=url, pending=p)
2074
2074
2075 # make changelog see real files again
2075 # make changelog see real files again
2076 cl.finalize(trp)
2076 cl.finalize(trp)
2077
2077
2078 tr.close()
2078 tr.close()
2079 finally:
2079 finally:
2080 del tr
2080 del tr
2081
2081
2082 if changesets > 0:
2082 if changesets > 0:
2083 # forcefully update the on-disk branch cache
2083 # forcefully update the on-disk branch cache
2084 self.ui.debug(_("updating the branch cache\n"))
2084 self.ui.debug(_("updating the branch cache\n"))
2085 self.branchtags()
2085 self.branchtags()
2086 self.hook("changegroup", node=hex(cl.node(clstart)),
2086 self.hook("changegroup", node=hex(cl.node(clstart)),
2087 source=srctype, url=url)
2087 source=srctype, url=url)
2088
2088
2089 for i in xrange(clstart, clend):
2089 for i in xrange(clstart, clend):
2090 self.hook("incoming", node=hex(cl.node(i)),
2090 self.hook("incoming", node=hex(cl.node(i)),
2091 source=srctype, url=url)
2091 source=srctype, url=url)
2092
2092
2093 # never return 0 here:
2093 # never return 0 here:
2094 if newheads < oldheads:
2094 if newheads < oldheads:
2095 return newheads - oldheads - 1
2095 return newheads - oldheads - 1
2096 else:
2096 else:
2097 return newheads - oldheads + 1
2097 return newheads - oldheads + 1
2098
2098
2099
2099
2100 def stream_in(self, remote):
2100 def stream_in(self, remote):
2101 fp = remote.stream_out()
2101 fp = remote.stream_out()
2102 l = fp.readline()
2102 l = fp.readline()
2103 try:
2103 try:
2104 resp = int(l)
2104 resp = int(l)
2105 except ValueError:
2105 except ValueError:
2106 raise error.ResponseError(
2106 raise error.ResponseError(
2107 _('Unexpected response from remote server:'), l)
2107 _('Unexpected response from remote server:'), l)
2108 if resp == 1:
2108 if resp == 1:
2109 raise util.Abort(_('operation forbidden by server'))
2109 raise util.Abort(_('operation forbidden by server'))
2110 elif resp == 2:
2110 elif resp == 2:
2111 raise util.Abort(_('locking the remote repository failed'))
2111 raise util.Abort(_('locking the remote repository failed'))
2112 elif resp != 0:
2112 elif resp != 0:
2113 raise util.Abort(_('the server sent an unknown error code'))
2113 raise util.Abort(_('the server sent an unknown error code'))
2114 self.ui.status(_('streaming all changes\n'))
2114 self.ui.status(_('streaming all changes\n'))
2115 l = fp.readline()
2115 l = fp.readline()
2116 try:
2116 try:
2117 total_files, total_bytes = map(int, l.split(' ', 1))
2117 total_files, total_bytes = map(int, l.split(' ', 1))
2118 except (ValueError, TypeError):
2118 except (ValueError, TypeError):
2119 raise error.ResponseError(
2119 raise error.ResponseError(
2120 _('Unexpected response from remote server:'), l)
2120 _('Unexpected response from remote server:'), l)
2121 self.ui.status(_('%d files to transfer, %s of data\n') %
2121 self.ui.status(_('%d files to transfer, %s of data\n') %
2122 (total_files, util.bytecount(total_bytes)))
2122 (total_files, util.bytecount(total_bytes)))
2123 start = time.time()
2123 start = time.time()
2124 for i in xrange(total_files):
2124 for i in xrange(total_files):
2125 # XXX doesn't support '\n' or '\r' in filenames
2125 # XXX doesn't support '\n' or '\r' in filenames
2126 l = fp.readline()
2126 l = fp.readline()
2127 try:
2127 try:
2128 name, size = l.split('\0', 1)
2128 name, size = l.split('\0', 1)
2129 size = int(size)
2129 size = int(size)
2130 except (ValueError, TypeError):
2130 except (ValueError, TypeError):
2131 raise error.ResponseError(
2131 raise error.ResponseError(
2132 _('Unexpected response from remote server:'), l)
2132 _('Unexpected response from remote server:'), l)
2133 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2133 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2134 # for backwards compat, name was partially encoded
2134 # for backwards compat, name was partially encoded
2135 ofp = self.sopener(store.decodedir(name), 'w')
2135 ofp = self.sopener(store.decodedir(name), 'w')
2136 for chunk in util.filechunkiter(fp, limit=size):
2136 for chunk in util.filechunkiter(fp, limit=size):
2137 ofp.write(chunk)
2137 ofp.write(chunk)
2138 ofp.close()
2138 ofp.close()
2139 elapsed = time.time() - start
2139 elapsed = time.time() - start
2140 if elapsed <= 0:
2140 if elapsed <= 0:
2141 elapsed = 0.001
2141 elapsed = 0.001
2142 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2142 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2143 (util.bytecount(total_bytes), elapsed,
2143 (util.bytecount(total_bytes), elapsed,
2144 util.bytecount(total_bytes / elapsed)))
2144 util.bytecount(total_bytes / elapsed)))
2145 self.invalidate()
2145 self.invalidate()
2146 return len(self.heads()) + 1
2146 return len(self.heads()) + 1
2147
2147
2148 def clone(self, remote, heads=[], stream=False):
2148 def clone(self, remote, heads=[], stream=False):
2149 '''clone remote repository.
2149 '''clone remote repository.
2150
2150
2151 keyword arguments:
2151 keyword arguments:
2152 heads: list of revs to clone (forces use of pull)
2152 heads: list of revs to clone (forces use of pull)
2153 stream: use streaming clone if possible'''
2153 stream: use streaming clone if possible'''
2154
2154
2155 # now, all clients that can request uncompressed clones can
2155 # now, all clients that can request uncompressed clones can
2156 # read repo formats supported by all servers that can serve
2156 # read repo formats supported by all servers that can serve
2157 # them.
2157 # them.
2158
2158
2159 # if revlog format changes, client will have to check version
2159 # if revlog format changes, client will have to check version
2160 # and format flags on "stream" capability, and use
2160 # and format flags on "stream" capability, and use
2161 # uncompressed only if compatible.
2161 # uncompressed only if compatible.
2162
2162
2163 if stream and not heads and remote.capable('stream'):
2163 if stream and not heads and remote.capable('stream'):
2164 return self.stream_in(remote)
2164 return self.stream_in(remote)
2165 return self.pull(remote, heads)
2165 return self.pull(remote, heads)
2166
2166
2167 # used to avoid circular references so destructors work
2167 # used to avoid circular references so destructors work
2168 def aftertrans(files):
2168 def aftertrans(files):
2169 renamefiles = [tuple(t) for t in files]
2169 renamefiles = [tuple(t) for t in files]
2170 def a():
2170 def a():
2171 for src, dest in renamefiles:
2171 for src, dest in renamefiles:
2172 util.rename(src, dest)
2172 util.rename(src, dest)
2173 return a
2173 return a
2174
2174
2175 def instance(ui, path, create):
2175 def instance(ui, path, create):
2176 return localrepository(ui, util.drop_scheme('file', path), create)
2176 return localrepository(ui, util.drop_scheme('file', path), create)
2177
2177
2178 def islocal(path):
2178 def islocal(path):
2179 return True
2179 return True
General Comments 0
You need to be logged in to leave comments. Login now