|
@@
-1,2135
+1,2137
b''
|
|
1
|
# localrepo.py - read/write repository class for mercurial
|
|
1
|
# localrepo.py - read/write repository class for mercurial
|
|
2
|
#
|
|
2
|
#
|
|
3
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
3
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
4
|
#
|
|
4
|
#
|
|
5
|
# This software may be used and distributed according to the terms of the
|
|
5
|
# This software may be used and distributed according to the terms of the
|
|
6
|
# GNU General Public License version 2 or any later version.
|
|
6
|
# GNU General Public License version 2 or any later version.
|
|
7
|
|
|
7
|
|
|
8
|
from node import bin, hex, nullid, nullrev, short
|
|
8
|
from node import bin, hex, nullid, nullrev, short
|
|
9
|
from i18n import _
|
|
9
|
from i18n import _
|
|
10
|
import repo, changegroup, subrepo, discovery, pushkey
|
|
10
|
import repo, changegroup, subrepo, discovery, pushkey
|
|
11
|
import changelog, dirstate, filelog, manifest, context, bookmarks, phases
|
|
11
|
import changelog, dirstate, filelog, manifest, context, bookmarks, phases
|
|
12
|
import lock, transaction, store, encoding
|
|
12
|
import lock, transaction, store, encoding
|
|
13
|
import scmutil, util, extensions, hook, error, revset
|
|
13
|
import scmutil, util, extensions, hook, error, revset
|
|
14
|
import match as matchmod
|
|
14
|
import match as matchmod
|
|
15
|
import merge as mergemod
|
|
15
|
import merge as mergemod
|
|
16
|
import tags as tagsmod
|
|
16
|
import tags as tagsmod
|
|
17
|
from lock import release
|
|
17
|
from lock import release
|
|
18
|
import weakref, errno, os, time, inspect
|
|
18
|
import weakref, errno, os, time, inspect
|
|
19
|
propertycache = util.propertycache
|
|
19
|
propertycache = util.propertycache
|
|
20
|
filecache = scmutil.filecache
|
|
20
|
filecache = scmutil.filecache
|
|
21
|
|
|
21
|
|
|
22
|
class localrepository(repo.repository):
|
|
22
|
class localrepository(repo.repository):
|
|
23
|
capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
|
|
23
|
capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
|
|
24
|
'known', 'getbundle'))
|
|
24
|
'known', 'getbundle'))
|
|
25
|
supportedformats = set(('revlogv1', 'generaldelta'))
|
|
25
|
supportedformats = set(('revlogv1', 'generaldelta'))
|
|
26
|
supported = supportedformats | set(('store', 'fncache', 'shared',
|
|
26
|
supported = supportedformats | set(('store', 'fncache', 'shared',
|
|
27
|
'dotencode'))
|
|
27
|
'dotencode'))
|
|
28
|
|
|
28
|
|
|
29
|
def __init__(self, baseui, path=None, create=False):
|
|
29
|
def __init__(self, baseui, path=None, create=False):
|
|
30
|
repo.repository.__init__(self)
|
|
30
|
repo.repository.__init__(self)
|
|
31
|
self.root = os.path.realpath(util.expandpath(path))
|
|
31
|
self.root = os.path.realpath(util.expandpath(path))
|
|
32
|
self.path = os.path.join(self.root, ".hg")
|
|
32
|
self.path = os.path.join(self.root, ".hg")
|
|
33
|
self.origroot = path
|
|
33
|
self.origroot = path
|
|
34
|
self.auditor = scmutil.pathauditor(self.root, self._checknested)
|
|
34
|
self.auditor = scmutil.pathauditor(self.root, self._checknested)
|
|
35
|
self.opener = scmutil.opener(self.path)
|
|
35
|
self.opener = scmutil.opener(self.path)
|
|
36
|
self.wopener = scmutil.opener(self.root)
|
|
36
|
self.wopener = scmutil.opener(self.root)
|
|
37
|
self.baseui = baseui
|
|
37
|
self.baseui = baseui
|
|
38
|
self.ui = baseui.copy()
|
|
38
|
self.ui = baseui.copy()
|
|
39
|
self._dirtyphases = False
|
|
39
|
self._dirtyphases = False
|
|
40
|
|
|
40
|
|
|
41
|
try:
|
|
41
|
try:
|
|
42
|
self.ui.readconfig(self.join("hgrc"), self.root)
|
|
42
|
self.ui.readconfig(self.join("hgrc"), self.root)
|
|
43
|
extensions.loadall(self.ui)
|
|
43
|
extensions.loadall(self.ui)
|
|
44
|
except IOError:
|
|
44
|
except IOError:
|
|
45
|
pass
|
|
45
|
pass
|
|
46
|
|
|
46
|
|
|
47
|
if not os.path.isdir(self.path):
|
|
47
|
if not os.path.isdir(self.path):
|
|
48
|
if create:
|
|
48
|
if create:
|
|
49
|
if not os.path.exists(path):
|
|
49
|
if not os.path.exists(path):
|
|
50
|
util.makedirs(path)
|
|
50
|
util.makedirs(path)
|
|
51
|
util.makedir(self.path, notindexed=True)
|
|
51
|
util.makedir(self.path, notindexed=True)
|
|
52
|
requirements = ["revlogv1"]
|
|
52
|
requirements = ["revlogv1"]
|
|
53
|
if self.ui.configbool('format', 'usestore', True):
|
|
53
|
if self.ui.configbool('format', 'usestore', True):
|
|
54
|
os.mkdir(os.path.join(self.path, "store"))
|
|
54
|
os.mkdir(os.path.join(self.path, "store"))
|
|
55
|
requirements.append("store")
|
|
55
|
requirements.append("store")
|
|
56
|
if self.ui.configbool('format', 'usefncache', True):
|
|
56
|
if self.ui.configbool('format', 'usefncache', True):
|
|
57
|
requirements.append("fncache")
|
|
57
|
requirements.append("fncache")
|
|
58
|
if self.ui.configbool('format', 'dotencode', True):
|
|
58
|
if self.ui.configbool('format', 'dotencode', True):
|
|
59
|
requirements.append('dotencode')
|
|
59
|
requirements.append('dotencode')
|
|
60
|
# create an invalid changelog
|
|
60
|
# create an invalid changelog
|
|
61
|
self.opener.append(
|
|
61
|
self.opener.append(
|
|
62
|
"00changelog.i",
|
|
62
|
"00changelog.i",
|
|
63
|
'\0\0\0\2' # represents revlogv2
|
|
63
|
'\0\0\0\2' # represents revlogv2
|
|
64
|
' dummy changelog to prevent using the old repo layout'
|
|
64
|
' dummy changelog to prevent using the old repo layout'
|
|
65
|
)
|
|
65
|
)
|
|
66
|
if self.ui.configbool('format', 'generaldelta', False):
|
|
66
|
if self.ui.configbool('format', 'generaldelta', False):
|
|
67
|
requirements.append("generaldelta")
|
|
67
|
requirements.append("generaldelta")
|
|
68
|
requirements = set(requirements)
|
|
68
|
requirements = set(requirements)
|
|
69
|
else:
|
|
69
|
else:
|
|
70
|
raise error.RepoError(_("repository %s not found") % path)
|
|
70
|
raise error.RepoError(_("repository %s not found") % path)
|
|
71
|
elif create:
|
|
71
|
elif create:
|
|
72
|
raise error.RepoError(_("repository %s already exists") % path)
|
|
72
|
raise error.RepoError(_("repository %s already exists") % path)
|
|
73
|
else:
|
|
73
|
else:
|
|
74
|
try:
|
|
74
|
try:
|
|
75
|
requirements = scmutil.readrequires(self.opener, self.supported)
|
|
75
|
requirements = scmutil.readrequires(self.opener, self.supported)
|
|
76
|
except IOError, inst:
|
|
76
|
except IOError, inst:
|
|
77
|
if inst.errno != errno.ENOENT:
|
|
77
|
if inst.errno != errno.ENOENT:
|
|
78
|
raise
|
|
78
|
raise
|
|
79
|
requirements = set()
|
|
79
|
requirements = set()
|
|
80
|
|
|
80
|
|
|
81
|
self.sharedpath = self.path
|
|
81
|
self.sharedpath = self.path
|
|
82
|
try:
|
|
82
|
try:
|
|
83
|
s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
|
|
83
|
s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
|
|
84
|
if not os.path.exists(s):
|
|
84
|
if not os.path.exists(s):
|
|
85
|
raise error.RepoError(
|
|
85
|
raise error.RepoError(
|
|
86
|
_('.hg/sharedpath points to nonexistent directory %s') % s)
|
|
86
|
_('.hg/sharedpath points to nonexistent directory %s') % s)
|
|
87
|
self.sharedpath = s
|
|
87
|
self.sharedpath = s
|
|
88
|
except IOError, inst:
|
|
88
|
except IOError, inst:
|
|
89
|
if inst.errno != errno.ENOENT:
|
|
89
|
if inst.errno != errno.ENOENT:
|
|
90
|
raise
|
|
90
|
raise
|
|
91
|
|
|
91
|
|
|
92
|
self.store = store.store(requirements, self.sharedpath, scmutil.opener)
|
|
92
|
self.store = store.store(requirements, self.sharedpath, scmutil.opener)
|
|
93
|
self.spath = self.store.path
|
|
93
|
self.spath = self.store.path
|
|
94
|
self.sopener = self.store.opener
|
|
94
|
self.sopener = self.store.opener
|
|
95
|
self.sjoin = self.store.join
|
|
95
|
self.sjoin = self.store.join
|
|
96
|
self.opener.createmode = self.store.createmode
|
|
96
|
self.opener.createmode = self.store.createmode
|
|
97
|
self._applyrequirements(requirements)
|
|
97
|
self._applyrequirements(requirements)
|
|
98
|
if create:
|
|
98
|
if create:
|
|
99
|
self._writerequirements()
|
|
99
|
self._writerequirements()
|
|
100
|
|
|
100
|
|
|
101
|
|
|
101
|
|
|
102
|
self._branchcache = None
|
|
102
|
self._branchcache = None
|
|
103
|
self._branchcachetip = None
|
|
103
|
self._branchcachetip = None
|
|
104
|
self.filterpats = {}
|
|
104
|
self.filterpats = {}
|
|
105
|
self._datafilters = {}
|
|
105
|
self._datafilters = {}
|
|
106
|
self._transref = self._lockref = self._wlockref = None
|
|
106
|
self._transref = self._lockref = self._wlockref = None
|
|
107
|
|
|
107
|
|
|
108
|
# A cache for various files under .hg/ that tracks file changes,
|
|
108
|
# A cache for various files under .hg/ that tracks file changes,
|
|
109
|
# (used by the filecache decorator)
|
|
109
|
# (used by the filecache decorator)
|
|
110
|
#
|
|
110
|
#
|
|
111
|
# Maps a property name to its util.filecacheentry
|
|
111
|
# Maps a property name to its util.filecacheentry
|
|
112
|
self._filecache = {}
|
|
112
|
self._filecache = {}
|
|
113
|
|
|
113
|
|
|
114
|
def _applyrequirements(self, requirements):
|
|
114
|
def _applyrequirements(self, requirements):
|
|
115
|
self.requirements = requirements
|
|
115
|
self.requirements = requirements
|
|
116
|
openerreqs = set(('revlogv1', 'generaldelta'))
|
|
116
|
openerreqs = set(('revlogv1', 'generaldelta'))
|
|
117
|
self.sopener.options = dict((r, 1) for r in requirements
|
|
117
|
self.sopener.options = dict((r, 1) for r in requirements
|
|
118
|
if r in openerreqs)
|
|
118
|
if r in openerreqs)
|
|
119
|
|
|
119
|
|
|
120
|
def _writerequirements(self):
|
|
120
|
def _writerequirements(self):
|
|
121
|
reqfile = self.opener("requires", "w")
|
|
121
|
reqfile = self.opener("requires", "w")
|
|
122
|
for r in self.requirements:
|
|
122
|
for r in self.requirements:
|
|
123
|
reqfile.write("%s\n" % r)
|
|
123
|
reqfile.write("%s\n" % r)
|
|
124
|
reqfile.close()
|
|
124
|
reqfile.close()
|
|
125
|
|
|
125
|
|
|
126
|
def _checknested(self, path):
|
|
126
|
def _checknested(self, path):
|
|
127
|
"""Determine if path is a legal nested repository."""
|
|
127
|
"""Determine if path is a legal nested repository."""
|
|
128
|
if not path.startswith(self.root):
|
|
128
|
if not path.startswith(self.root):
|
|
129
|
return False
|
|
129
|
return False
|
|
130
|
subpath = path[len(self.root) + 1:]
|
|
130
|
subpath = path[len(self.root) + 1:]
|
|
131
|
|
|
131
|
|
|
132
|
# XXX: Checking against the current working copy is wrong in
|
|
132
|
# XXX: Checking against the current working copy is wrong in
|
|
133
|
# the sense that it can reject things like
|
|
133
|
# the sense that it can reject things like
|
|
134
|
#
|
|
134
|
#
|
|
135
|
# $ hg cat -r 10 sub/x.txt
|
|
135
|
# $ hg cat -r 10 sub/x.txt
|
|
136
|
#
|
|
136
|
#
|
|
137
|
# if sub/ is no longer a subrepository in the working copy
|
|
137
|
# if sub/ is no longer a subrepository in the working copy
|
|
138
|
# parent revision.
|
|
138
|
# parent revision.
|
|
139
|
#
|
|
139
|
#
|
|
140
|
# However, it can of course also allow things that would have
|
|
140
|
# However, it can of course also allow things that would have
|
|
141
|
# been rejected before, such as the above cat command if sub/
|
|
141
|
# been rejected before, such as the above cat command if sub/
|
|
142
|
# is a subrepository now, but was a normal directory before.
|
|
142
|
# is a subrepository now, but was a normal directory before.
|
|
143
|
# The old path auditor would have rejected by mistake since it
|
|
143
|
# The old path auditor would have rejected by mistake since it
|
|
144
|
# panics when it sees sub/.hg/.
|
|
144
|
# panics when it sees sub/.hg/.
|
|
145
|
#
|
|
145
|
#
|
|
146
|
# All in all, checking against the working copy seems sensible
|
|
146
|
# All in all, checking against the working copy seems sensible
|
|
147
|
# since we want to prevent access to nested repositories on
|
|
147
|
# since we want to prevent access to nested repositories on
|
|
148
|
# the filesystem *now*.
|
|
148
|
# the filesystem *now*.
|
|
149
|
ctx = self[None]
|
|
149
|
ctx = self[None]
|
|
150
|
parts = util.splitpath(subpath)
|
|
150
|
parts = util.splitpath(subpath)
|
|
151
|
while parts:
|
|
151
|
while parts:
|
|
152
|
prefix = os.sep.join(parts)
|
|
152
|
prefix = os.sep.join(parts)
|
|
153
|
if prefix in ctx.substate:
|
|
153
|
if prefix in ctx.substate:
|
|
154
|
if prefix == subpath:
|
|
154
|
if prefix == subpath:
|
|
155
|
return True
|
|
155
|
return True
|
|
156
|
else:
|
|
156
|
else:
|
|
157
|
sub = ctx.sub(prefix)
|
|
157
|
sub = ctx.sub(prefix)
|
|
158
|
return sub.checknested(subpath[len(prefix) + 1:])
|
|
158
|
return sub.checknested(subpath[len(prefix) + 1:])
|
|
159
|
else:
|
|
159
|
else:
|
|
160
|
parts.pop()
|
|
160
|
parts.pop()
|
|
161
|
return False
|
|
161
|
return False
|
|
162
|
|
|
162
|
|
|
163
|
@filecache('bookmarks')
|
|
163
|
@filecache('bookmarks')
|
|
164
|
def _bookmarks(self):
|
|
164
|
def _bookmarks(self):
|
|
165
|
return bookmarks.read(self)
|
|
165
|
return bookmarks.read(self)
|
|
166
|
|
|
166
|
|
|
167
|
@filecache('bookmarks.current')
|
|
167
|
@filecache('bookmarks.current')
|
|
168
|
def _bookmarkcurrent(self):
|
|
168
|
def _bookmarkcurrent(self):
|
|
169
|
return bookmarks.readcurrent(self)
|
|
169
|
return bookmarks.readcurrent(self)
|
|
170
|
|
|
170
|
|
|
171
|
def _writebookmarks(self, marks):
|
|
171
|
def _writebookmarks(self, marks):
|
|
172
|
bookmarks.write(self)
|
|
172
|
bookmarks.write(self)
|
|
173
|
|
|
173
|
|
|
174
|
@filecache('phaseroots')
|
|
174
|
@filecache('phaseroots')
|
|
175
|
def _phaseroots(self):
|
|
175
|
def _phaseroots(self):
|
|
176
|
self._dirtyphases = False
|
|
176
|
self._dirtyphases = False
|
|
177
|
phaseroots = phases.readroots(self)
|
|
177
|
phaseroots = phases.readroots(self)
|
|
178
|
phases.filterunknown(self, phaseroots)
|
|
178
|
phases.filterunknown(self, phaseroots)
|
|
179
|
return phaseroots
|
|
179
|
return phaseroots
|
|
180
|
|
|
180
|
|
|
181
|
@propertycache
|
|
181
|
@propertycache
|
|
182
|
def _phaserev(self):
|
|
182
|
def _phaserev(self):
|
|
183
|
cache = [0] * len(self)
|
|
183
|
cache = [0] * len(self)
|
|
184
|
for phase in phases.trackedphases:
|
|
184
|
for phase in phases.trackedphases:
|
|
185
|
roots = map(self.changelog.rev, self._phaseroots[phase])
|
|
185
|
roots = map(self.changelog.rev, self._phaseroots[phase])
|
|
186
|
if roots:
|
|
186
|
if roots:
|
|
187
|
for rev in roots:
|
|
187
|
for rev in roots:
|
|
188
|
cache[rev] = phase
|
|
188
|
cache[rev] = phase
|
|
189
|
for rev in self.changelog.descendants(*roots):
|
|
189
|
for rev in self.changelog.descendants(*roots):
|
|
190
|
cache[rev] = phase
|
|
190
|
cache[rev] = phase
|
|
191
|
return cache
|
|
191
|
return cache
|
|
192
|
|
|
192
|
|
|
193
|
@filecache('00changelog.i', True)
|
|
193
|
@filecache('00changelog.i', True)
|
|
194
|
def changelog(self):
|
|
194
|
def changelog(self):
|
|
195
|
c = changelog.changelog(self.sopener)
|
|
195
|
c = changelog.changelog(self.sopener)
|
|
196
|
if 'HG_PENDING' in os.environ:
|
|
196
|
if 'HG_PENDING' in os.environ:
|
|
197
|
p = os.environ['HG_PENDING']
|
|
197
|
p = os.environ['HG_PENDING']
|
|
198
|
if p.startswith(self.root):
|
|
198
|
if p.startswith(self.root):
|
|
199
|
c.readpending('00changelog.i.a')
|
|
199
|
c.readpending('00changelog.i.a')
|
|
200
|
return c
|
|
200
|
return c
|
|
201
|
|
|
201
|
|
|
202
|
@filecache('00manifest.i', True)
|
|
202
|
@filecache('00manifest.i', True)
|
|
203
|
def manifest(self):
|
|
203
|
def manifest(self):
|
|
204
|
return manifest.manifest(self.sopener)
|
|
204
|
return manifest.manifest(self.sopener)
|
|
205
|
|
|
205
|
|
|
206
|
@filecache('dirstate')
|
|
206
|
@filecache('dirstate')
|
|
207
|
def dirstate(self):
|
|
207
|
def dirstate(self):
|
|
208
|
warned = [0]
|
|
208
|
warned = [0]
|
|
209
|
def validate(node):
|
|
209
|
def validate(node):
|
|
210
|
try:
|
|
210
|
try:
|
|
211
|
self.changelog.rev(node)
|
|
211
|
self.changelog.rev(node)
|
|
212
|
return node
|
|
212
|
return node
|
|
213
|
except error.LookupError:
|
|
213
|
except error.LookupError:
|
|
214
|
if not warned[0]:
|
|
214
|
if not warned[0]:
|
|
215
|
warned[0] = True
|
|
215
|
warned[0] = True
|
|
216
|
self.ui.warn(_("warning: ignoring unknown"
|
|
216
|
self.ui.warn(_("warning: ignoring unknown"
|
|
217
|
" working parent %s!\n") % short(node))
|
|
217
|
" working parent %s!\n") % short(node))
|
|
218
|
return nullid
|
|
218
|
return nullid
|
|
219
|
|
|
219
|
|
|
220
|
return dirstate.dirstate(self.opener, self.ui, self.root, validate)
|
|
220
|
return dirstate.dirstate(self.opener, self.ui, self.root, validate)
|
|
221
|
|
|
221
|
|
|
222
|
def __getitem__(self, changeid):
|
|
222
|
def __getitem__(self, changeid):
|
|
223
|
if changeid is None:
|
|
223
|
if changeid is None:
|
|
224
|
return context.workingctx(self)
|
|
224
|
return context.workingctx(self)
|
|
225
|
return context.changectx(self, changeid)
|
|
225
|
return context.changectx(self, changeid)
|
|
226
|
|
|
226
|
|
|
227
|
def __contains__(self, changeid):
|
|
227
|
def __contains__(self, changeid):
|
|
228
|
try:
|
|
228
|
try:
|
|
229
|
return bool(self.lookup(changeid))
|
|
229
|
return bool(self.lookup(changeid))
|
|
230
|
except error.RepoLookupError:
|
|
230
|
except error.RepoLookupError:
|
|
231
|
return False
|
|
231
|
return False
|
|
232
|
|
|
232
|
|
|
233
|
def __nonzero__(self):
|
|
233
|
def __nonzero__(self):
|
|
234
|
return True
|
|
234
|
return True
|
|
235
|
|
|
235
|
|
|
236
|
def __len__(self):
|
|
236
|
def __len__(self):
|
|
237
|
return len(self.changelog)
|
|
237
|
return len(self.changelog)
|
|
238
|
|
|
238
|
|
|
239
|
def __iter__(self):
|
|
239
|
def __iter__(self):
|
|
240
|
for i in xrange(len(self)):
|
|
240
|
for i in xrange(len(self)):
|
|
241
|
yield i
|
|
241
|
yield i
|
|
242
|
|
|
242
|
|
|
243
|
def revs(self, expr, *args):
|
|
243
|
def revs(self, expr, *args):
|
|
244
|
'''Return a list of revisions matching the given revset'''
|
|
244
|
'''Return a list of revisions matching the given revset'''
|
|
245
|
expr = revset.formatspec(expr, *args)
|
|
245
|
expr = revset.formatspec(expr, *args)
|
|
246
|
m = revset.match(None, expr)
|
|
246
|
m = revset.match(None, expr)
|
|
247
|
return [r for r in m(self, range(len(self)))]
|
|
247
|
return [r for r in m(self, range(len(self)))]
|
|
248
|
|
|
248
|
|
|
249
|
def set(self, expr, *args):
|
|
249
|
def set(self, expr, *args):
|
|
250
|
'''
|
|
250
|
'''
|
|
251
|
Yield a context for each matching revision, after doing arg
|
|
251
|
Yield a context for each matching revision, after doing arg
|
|
252
|
replacement via revset.formatspec
|
|
252
|
replacement via revset.formatspec
|
|
253
|
'''
|
|
253
|
'''
|
|
254
|
for r in self.revs(expr, *args):
|
|
254
|
for r in self.revs(expr, *args):
|
|
255
|
yield self[r]
|
|
255
|
yield self[r]
|
|
256
|
|
|
256
|
|
|
257
|
def url(self):
|
|
257
|
def url(self):
|
|
258
|
return 'file:' + self.root
|
|
258
|
return 'file:' + self.root
|
|
259
|
|
|
259
|
|
|
260
|
def hook(self, name, throw=False, **args):
|
|
260
|
def hook(self, name, throw=False, **args):
|
|
261
|
return hook.hook(self.ui, self, name, throw, **args)
|
|
261
|
return hook.hook(self.ui, self, name, throw, **args)
|
|
262
|
|
|
262
|
|
|
263
|
tag_disallowed = ':\r\n'
|
|
263
|
tag_disallowed = ':\r\n'
|
|
264
|
|
|
264
|
|
|
265
|
def _tag(self, names, node, message, local, user, date, extra={}):
|
|
265
|
def _tag(self, names, node, message, local, user, date, extra={}):
|
|
266
|
if isinstance(names, str):
|
|
266
|
if isinstance(names, str):
|
|
267
|
allchars = names
|
|
267
|
allchars = names
|
|
268
|
names = (names,)
|
|
268
|
names = (names,)
|
|
269
|
else:
|
|
269
|
else:
|
|
270
|
allchars = ''.join(names)
|
|
270
|
allchars = ''.join(names)
|
|
271
|
for c in self.tag_disallowed:
|
|
271
|
for c in self.tag_disallowed:
|
|
272
|
if c in allchars:
|
|
272
|
if c in allchars:
|
|
273
|
raise util.Abort(_('%r cannot be used in a tag name') % c)
|
|
273
|
raise util.Abort(_('%r cannot be used in a tag name') % c)
|
|
274
|
|
|
274
|
|
|
275
|
branches = self.branchmap()
|
|
275
|
branches = self.branchmap()
|
|
276
|
for name in names:
|
|
276
|
for name in names:
|
|
277
|
self.hook('pretag', throw=True, node=hex(node), tag=name,
|
|
277
|
self.hook('pretag', throw=True, node=hex(node), tag=name,
|
|
278
|
local=local)
|
|
278
|
local=local)
|
|
279
|
if name in branches:
|
|
279
|
if name in branches:
|
|
280
|
self.ui.warn(_("warning: tag %s conflicts with existing"
|
|
280
|
self.ui.warn(_("warning: tag %s conflicts with existing"
|
|
281
|
" branch name\n") % name)
|
|
281
|
" branch name\n") % name)
|
|
282
|
|
|
282
|
|
|
283
|
def writetags(fp, names, munge, prevtags):
|
|
283
|
def writetags(fp, names, munge, prevtags):
|
|
284
|
fp.seek(0, 2)
|
|
284
|
fp.seek(0, 2)
|
|
285
|
if prevtags and prevtags[-1] != '\n':
|
|
285
|
if prevtags and prevtags[-1] != '\n':
|
|
286
|
fp.write('\n')
|
|
286
|
fp.write('\n')
|
|
287
|
for name in names:
|
|
287
|
for name in names:
|
|
288
|
m = munge and munge(name) or name
|
|
288
|
m = munge and munge(name) or name
|
|
289
|
if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
|
|
289
|
if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
|
|
290
|
old = self.tags().get(name, nullid)
|
|
290
|
old = self.tags().get(name, nullid)
|
|
291
|
fp.write('%s %s\n' % (hex(old), m))
|
|
291
|
fp.write('%s %s\n' % (hex(old), m))
|
|
292
|
fp.write('%s %s\n' % (hex(node), m))
|
|
292
|
fp.write('%s %s\n' % (hex(node), m))
|
|
293
|
fp.close()
|
|
293
|
fp.close()
|
|
294
|
|
|
294
|
|
|
295
|
prevtags = ''
|
|
295
|
prevtags = ''
|
|
296
|
if local:
|
|
296
|
if local:
|
|
297
|
try:
|
|
297
|
try:
|
|
298
|
fp = self.opener('localtags', 'r+')
|
|
298
|
fp = self.opener('localtags', 'r+')
|
|
299
|
except IOError:
|
|
299
|
except IOError:
|
|
300
|
fp = self.opener('localtags', 'a')
|
|
300
|
fp = self.opener('localtags', 'a')
|
|
301
|
else:
|
|
301
|
else:
|
|
302
|
prevtags = fp.read()
|
|
302
|
prevtags = fp.read()
|
|
303
|
|
|
303
|
|
|
304
|
# local tags are stored in the current charset
|
|
304
|
# local tags are stored in the current charset
|
|
305
|
writetags(fp, names, None, prevtags)
|
|
305
|
writetags(fp, names, None, prevtags)
|
|
306
|
for name in names:
|
|
306
|
for name in names:
|
|
307
|
self.hook('tag', node=hex(node), tag=name, local=local)
|
|
307
|
self.hook('tag', node=hex(node), tag=name, local=local)
|
|
308
|
return
|
|
308
|
return
|
|
309
|
|
|
309
|
|
|
310
|
try:
|
|
310
|
try:
|
|
311
|
fp = self.wfile('.hgtags', 'rb+')
|
|
311
|
fp = self.wfile('.hgtags', 'rb+')
|
|
312
|
except IOError, e:
|
|
312
|
except IOError, e:
|
|
313
|
if e.errno != errno.ENOENT:
|
|
313
|
if e.errno != errno.ENOENT:
|
|
314
|
raise
|
|
314
|
raise
|
|
315
|
fp = self.wfile('.hgtags', 'ab')
|
|
315
|
fp = self.wfile('.hgtags', 'ab')
|
|
316
|
else:
|
|
316
|
else:
|
|
317
|
prevtags = fp.read()
|
|
317
|
prevtags = fp.read()
|
|
318
|
|
|
318
|
|
|
319
|
# committed tags are stored in UTF-8
|
|
319
|
# committed tags are stored in UTF-8
|
|
320
|
writetags(fp, names, encoding.fromlocal, prevtags)
|
|
320
|
writetags(fp, names, encoding.fromlocal, prevtags)
|
|
321
|
|
|
321
|
|
|
322
|
fp.close()
|
|
322
|
fp.close()
|
|
323
|
|
|
323
|
|
|
324
|
if '.hgtags' not in self.dirstate:
|
|
324
|
if '.hgtags' not in self.dirstate:
|
|
325
|
self[None].add(['.hgtags'])
|
|
325
|
self[None].add(['.hgtags'])
|
|
326
|
|
|
326
|
|
|
327
|
m = matchmod.exact(self.root, '', ['.hgtags'])
|
|
327
|
m = matchmod.exact(self.root, '', ['.hgtags'])
|
|
328
|
tagnode = self.commit(message, user, date, extra=extra, match=m)
|
|
328
|
tagnode = self.commit(message, user, date, extra=extra, match=m)
|
|
329
|
|
|
329
|
|
|
330
|
for name in names:
|
|
330
|
for name in names:
|
|
331
|
self.hook('tag', node=hex(node), tag=name, local=local)
|
|
331
|
self.hook('tag', node=hex(node), tag=name, local=local)
|
|
332
|
|
|
332
|
|
|
333
|
return tagnode
|
|
333
|
return tagnode
|
|
334
|
|
|
334
|
|
|
335
|
def tag(self, names, node, message, local, user, date):
|
|
335
|
def tag(self, names, node, message, local, user, date):
|
|
336
|
'''tag a revision with one or more symbolic names.
|
|
336
|
'''tag a revision with one or more symbolic names.
|
|
337
|
|
|
337
|
|
|
338
|
names is a list of strings or, when adding a single tag, names may be a
|
|
338
|
names is a list of strings or, when adding a single tag, names may be a
|
|
339
|
string.
|
|
339
|
string.
|
|
340
|
|
|
340
|
|
|
341
|
if local is True, the tags are stored in a per-repository file.
|
|
341
|
if local is True, the tags are stored in a per-repository file.
|
|
342
|
otherwise, they are stored in the .hgtags file, and a new
|
|
342
|
otherwise, they are stored in the .hgtags file, and a new
|
|
343
|
changeset is committed with the change.
|
|
343
|
changeset is committed with the change.
|
|
344
|
|
|
344
|
|
|
345
|
keyword arguments:
|
|
345
|
keyword arguments:
|
|
346
|
|
|
346
|
|
|
347
|
local: whether to store tags in non-version-controlled file
|
|
347
|
local: whether to store tags in non-version-controlled file
|
|
348
|
(default False)
|
|
348
|
(default False)
|
|
349
|
|
|
349
|
|
|
350
|
message: commit message to use if committing
|
|
350
|
message: commit message to use if committing
|
|
351
|
|
|
351
|
|
|
352
|
user: name of user to use if committing
|
|
352
|
user: name of user to use if committing
|
|
353
|
|
|
353
|
|
|
354
|
date: date tuple to use if committing'''
|
|
354
|
date: date tuple to use if committing'''
|
|
355
|
|
|
355
|
|
|
356
|
if not local:
|
|
356
|
if not local:
|
|
357
|
for x in self.status()[:5]:
|
|
357
|
for x in self.status()[:5]:
|
|
358
|
if '.hgtags' in x:
|
|
358
|
if '.hgtags' in x:
|
|
359
|
raise util.Abort(_('working copy of .hgtags is changed '
|
|
359
|
raise util.Abort(_('working copy of .hgtags is changed '
|
|
360
|
'(please commit .hgtags manually)'))
|
|
360
|
'(please commit .hgtags manually)'))
|
|
361
|
|
|
361
|
|
|
362
|
self.tags() # instantiate the cache
|
|
362
|
self.tags() # instantiate the cache
|
|
363
|
self._tag(names, node, message, local, user, date)
|
|
363
|
self._tag(names, node, message, local, user, date)
|
|
364
|
|
|
364
|
|
|
365
|
@propertycache
|
|
365
|
@propertycache
|
|
366
|
def _tagscache(self):
|
|
366
|
def _tagscache(self):
|
|
367
|
'''Returns a tagscache object that contains various tags related caches.'''
|
|
367
|
'''Returns a tagscache object that contains various tags related caches.'''
|
|
368
|
|
|
368
|
|
|
369
|
# This simplifies its cache management by having one decorated
|
|
369
|
# This simplifies its cache management by having one decorated
|
|
370
|
# function (this one) and the rest simply fetch things from it.
|
|
370
|
# function (this one) and the rest simply fetch things from it.
|
|
371
|
class tagscache(object):
|
|
371
|
class tagscache(object):
|
|
372
|
def __init__(self):
|
|
372
|
def __init__(self):
|
|
373
|
# These two define the set of tags for this repository. tags
|
|
373
|
# These two define the set of tags for this repository. tags
|
|
374
|
# maps tag name to node; tagtypes maps tag name to 'global' or
|
|
374
|
# maps tag name to node; tagtypes maps tag name to 'global' or
|
|
375
|
# 'local'. (Global tags are defined by .hgtags across all
|
|
375
|
# 'local'. (Global tags are defined by .hgtags across all
|
|
376
|
# heads, and local tags are defined in .hg/localtags.)
|
|
376
|
# heads, and local tags are defined in .hg/localtags.)
|
|
377
|
# They constitute the in-memory cache of tags.
|
|
377
|
# They constitute the in-memory cache of tags.
|
|
378
|
self.tags = self.tagtypes = None
|
|
378
|
self.tags = self.tagtypes = None
|
|
379
|
|
|
379
|
|
|
380
|
self.nodetagscache = self.tagslist = None
|
|
380
|
self.nodetagscache = self.tagslist = None
|
|
381
|
|
|
381
|
|
|
382
|
cache = tagscache()
|
|
382
|
cache = tagscache()
|
|
383
|
cache.tags, cache.tagtypes = self._findtags()
|
|
383
|
cache.tags, cache.tagtypes = self._findtags()
|
|
384
|
|
|
384
|
|
|
385
|
return cache
|
|
385
|
return cache
|
|
386
|
|
|
386
|
|
|
387
|
def tags(self):
|
|
387
|
def tags(self):
|
|
388
|
'''return a mapping of tag to node'''
|
|
388
|
'''return a mapping of tag to node'''
|
|
389
|
return self._tagscache.tags
|
|
389
|
return self._tagscache.tags
|
|
390
|
|
|
390
|
|
|
391
|
def _findtags(self):
|
|
391
|
def _findtags(self):
|
|
392
|
'''Do the hard work of finding tags. Return a pair of dicts
|
|
392
|
'''Do the hard work of finding tags. Return a pair of dicts
|
|
393
|
(tags, tagtypes) where tags maps tag name to node, and tagtypes
|
|
393
|
(tags, tagtypes) where tags maps tag name to node, and tagtypes
|
|
394
|
maps tag name to a string like \'global\' or \'local\'.
|
|
394
|
maps tag name to a string like \'global\' or \'local\'.
|
|
395
|
Subclasses or extensions are free to add their own tags, but
|
|
395
|
Subclasses or extensions are free to add their own tags, but
|
|
396
|
should be aware that the returned dicts will be retained for the
|
|
396
|
should be aware that the returned dicts will be retained for the
|
|
397
|
duration of the localrepo object.'''
|
|
397
|
duration of the localrepo object.'''
|
|
398
|
|
|
398
|
|
|
399
|
# XXX what tagtype should subclasses/extensions use? Currently
|
|
399
|
# XXX what tagtype should subclasses/extensions use? Currently
|
|
400
|
# mq and bookmarks add tags, but do not set the tagtype at all.
|
|
400
|
# mq and bookmarks add tags, but do not set the tagtype at all.
|
|
401
|
# Should each extension invent its own tag type? Should there
|
|
401
|
# Should each extension invent its own tag type? Should there
|
|
402
|
# be one tagtype for all such "virtual" tags? Or is the status
|
|
402
|
# be one tagtype for all such "virtual" tags? Or is the status
|
|
403
|
# quo fine?
|
|
403
|
# quo fine?
|
|
404
|
|
|
404
|
|
|
405
|
alltags = {} # map tag name to (node, hist)
|
|
405
|
alltags = {} # map tag name to (node, hist)
|
|
406
|
tagtypes = {}
|
|
406
|
tagtypes = {}
|
|
407
|
|
|
407
|
|
|
408
|
tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
|
|
408
|
tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
|
|
409
|
tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
|
|
409
|
tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
|
|
410
|
|
|
410
|
|
|
411
|
# Build the return dicts. Have to re-encode tag names because
|
|
411
|
# Build the return dicts. Have to re-encode tag names because
|
|
412
|
# the tags module always uses UTF-8 (in order not to lose info
|
|
412
|
# the tags module always uses UTF-8 (in order not to lose info
|
|
413
|
# writing to the cache), but the rest of Mercurial wants them in
|
|
413
|
# writing to the cache), but the rest of Mercurial wants them in
|
|
414
|
# local encoding.
|
|
414
|
# local encoding.
|
|
415
|
tags = {}
|
|
415
|
tags = {}
|
|
416
|
for (name, (node, hist)) in alltags.iteritems():
|
|
416
|
for (name, (node, hist)) in alltags.iteritems():
|
|
417
|
if node != nullid:
|
|
417
|
if node != nullid:
|
|
418
|
try:
|
|
418
|
try:
|
|
419
|
# ignore tags to unknown nodes
|
|
419
|
# ignore tags to unknown nodes
|
|
420
|
self.changelog.lookup(node)
|
|
420
|
self.changelog.lookup(node)
|
|
421
|
tags[encoding.tolocal(name)] = node
|
|
421
|
tags[encoding.tolocal(name)] = node
|
|
422
|
except error.LookupError:
|
|
422
|
except error.LookupError:
|
|
423
|
pass
|
|
423
|
pass
|
|
424
|
tags['tip'] = self.changelog.tip()
|
|
424
|
tags['tip'] = self.changelog.tip()
|
|
425
|
tagtypes = dict([(encoding.tolocal(name), value)
|
|
425
|
tagtypes = dict([(encoding.tolocal(name), value)
|
|
426
|
for (name, value) in tagtypes.iteritems()])
|
|
426
|
for (name, value) in tagtypes.iteritems()])
|
|
427
|
return (tags, tagtypes)
|
|
427
|
return (tags, tagtypes)
|
|
428
|
|
|
428
|
|
|
429
|
def tagtype(self, tagname):
|
|
429
|
def tagtype(self, tagname):
|
|
430
|
'''
|
|
430
|
'''
|
|
431
|
return the type of the given tag. result can be:
|
|
431
|
return the type of the given tag. result can be:
|
|
432
|
|
|
432
|
|
|
433
|
'local' : a local tag
|
|
433
|
'local' : a local tag
|
|
434
|
'global' : a global tag
|
|
434
|
'global' : a global tag
|
|
435
|
None : tag does not exist
|
|
435
|
None : tag does not exist
|
|
436
|
'''
|
|
436
|
'''
|
|
437
|
|
|
437
|
|
|
438
|
return self._tagscache.tagtypes.get(tagname)
|
|
438
|
return self._tagscache.tagtypes.get(tagname)
|
|
439
|
|
|
439
|
|
|
440
|
def tagslist(self):
|
|
440
|
def tagslist(self):
|
|
441
|
'''return a list of tags ordered by revision'''
|
|
441
|
'''return a list of tags ordered by revision'''
|
|
442
|
if not self._tagscache.tagslist:
|
|
442
|
if not self._tagscache.tagslist:
|
|
443
|
l = []
|
|
443
|
l = []
|
|
444
|
for t, n in self.tags().iteritems():
|
|
444
|
for t, n in self.tags().iteritems():
|
|
445
|
r = self.changelog.rev(n)
|
|
445
|
r = self.changelog.rev(n)
|
|
446
|
l.append((r, t, n))
|
|
446
|
l.append((r, t, n))
|
|
447
|
self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
|
|
447
|
self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
|
|
448
|
|
|
448
|
|
|
449
|
return self._tagscache.tagslist
|
|
449
|
return self._tagscache.tagslist
|
|
450
|
|
|
450
|
|
|
451
|
def nodetags(self, node):
|
|
451
|
def nodetags(self, node):
|
|
452
|
'''return the tags associated with a node'''
|
|
452
|
'''return the tags associated with a node'''
|
|
453
|
if not self._tagscache.nodetagscache:
|
|
453
|
if not self._tagscache.nodetagscache:
|
|
454
|
nodetagscache = {}
|
|
454
|
nodetagscache = {}
|
|
455
|
for t, n in self.tags().iteritems():
|
|
455
|
for t, n in self.tags().iteritems():
|
|
456
|
nodetagscache.setdefault(n, []).append(t)
|
|
456
|
nodetagscache.setdefault(n, []).append(t)
|
|
457
|
for tags in nodetagscache.itervalues():
|
|
457
|
for tags in nodetagscache.itervalues():
|
|
458
|
tags.sort()
|
|
458
|
tags.sort()
|
|
459
|
self._tagscache.nodetagscache = nodetagscache
|
|
459
|
self._tagscache.nodetagscache = nodetagscache
|
|
460
|
return self._tagscache.nodetagscache.get(node, [])
|
|
460
|
return self._tagscache.nodetagscache.get(node, [])
|
|
461
|
|
|
461
|
|
|
462
|
def nodebookmarks(self, node):
|
|
462
|
def nodebookmarks(self, node):
|
|
463
|
marks = []
|
|
463
|
marks = []
|
|
464
|
for bookmark, n in self._bookmarks.iteritems():
|
|
464
|
for bookmark, n in self._bookmarks.iteritems():
|
|
465
|
if n == node:
|
|
465
|
if n == node:
|
|
466
|
marks.append(bookmark)
|
|
466
|
marks.append(bookmark)
|
|
467
|
return sorted(marks)
|
|
467
|
return sorted(marks)
|
|
468
|
|
|
468
|
|
|
469
|
def _branchtags(self, partial, lrev):
|
|
469
|
def _branchtags(self, partial, lrev):
|
|
470
|
# TODO: rename this function?
|
|
470
|
# TODO: rename this function?
|
|
471
|
tiprev = len(self) - 1
|
|
471
|
tiprev = len(self) - 1
|
|
472
|
if lrev != tiprev:
|
|
472
|
if lrev != tiprev:
|
|
473
|
ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
|
|
473
|
ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
|
|
474
|
self._updatebranchcache(partial, ctxgen)
|
|
474
|
self._updatebranchcache(partial, ctxgen)
|
|
475
|
self._writebranchcache(partial, self.changelog.tip(), tiprev)
|
|
475
|
self._writebranchcache(partial, self.changelog.tip(), tiprev)
|
|
476
|
|
|
476
|
|
|
477
|
return partial
|
|
477
|
return partial
|
|
478
|
|
|
478
|
|
|
479
|
def updatebranchcache(self):
|
|
479
|
def updatebranchcache(self):
|
|
480
|
tip = self.changelog.tip()
|
|
480
|
tip = self.changelog.tip()
|
|
481
|
if self._branchcache is not None and self._branchcachetip == tip:
|
|
481
|
if self._branchcache is not None and self._branchcachetip == tip:
|
|
482
|
return self._branchcache
|
|
482
|
return self._branchcache
|
|
483
|
|
|
483
|
|
|
484
|
oldtip = self._branchcachetip
|
|
484
|
oldtip = self._branchcachetip
|
|
485
|
self._branchcachetip = tip
|
|
485
|
self._branchcachetip = tip
|
|
486
|
if oldtip is None or oldtip not in self.changelog.nodemap:
|
|
486
|
if oldtip is None or oldtip not in self.changelog.nodemap:
|
|
487
|
partial, last, lrev = self._readbranchcache()
|
|
487
|
partial, last, lrev = self._readbranchcache()
|
|
488
|
else:
|
|
488
|
else:
|
|
489
|
lrev = self.changelog.rev(oldtip)
|
|
489
|
lrev = self.changelog.rev(oldtip)
|
|
490
|
partial = self._branchcache
|
|
490
|
partial = self._branchcache
|
|
491
|
|
|
491
|
|
|
492
|
self._branchtags(partial, lrev)
|
|
492
|
self._branchtags(partial, lrev)
|
|
493
|
# this private cache holds all heads (not just tips)
|
|
493
|
# this private cache holds all heads (not just tips)
|
|
494
|
self._branchcache = partial
|
|
494
|
self._branchcache = partial
|
|
495
|
|
|
495
|
|
|
496
|
def branchmap(self):
|
|
496
|
def branchmap(self):
|
|
497
|
'''returns a dictionary {branch: [branchheads]}'''
|
|
497
|
'''returns a dictionary {branch: [branchheads]}'''
|
|
498
|
self.updatebranchcache()
|
|
498
|
self.updatebranchcache()
|
|
499
|
return self._branchcache
|
|
499
|
return self._branchcache
|
|
500
|
|
|
500
|
|
|
501
|
def branchtags(self):
|
|
501
|
def branchtags(self):
|
|
502
|
'''return a dict where branch names map to the tipmost head of
|
|
502
|
'''return a dict where branch names map to the tipmost head of
|
|
503
|
the branch, open heads come before closed'''
|
|
503
|
the branch, open heads come before closed'''
|
|
504
|
bt = {}
|
|
504
|
bt = {}
|
|
505
|
for bn, heads in self.branchmap().iteritems():
|
|
505
|
for bn, heads in self.branchmap().iteritems():
|
|
506
|
tip = heads[-1]
|
|
506
|
tip = heads[-1]
|
|
507
|
for h in reversed(heads):
|
|
507
|
for h in reversed(heads):
|
|
508
|
if 'close' not in self.changelog.read(h)[5]:
|
|
508
|
if 'close' not in self.changelog.read(h)[5]:
|
|
509
|
tip = h
|
|
509
|
tip = h
|
|
510
|
break
|
|
510
|
break
|
|
511
|
bt[bn] = tip
|
|
511
|
bt[bn] = tip
|
|
512
|
return bt
|
|
512
|
return bt
|
|
513
|
|
|
513
|
|
|
514
|
def _readbranchcache(self):
|
|
514
|
def _readbranchcache(self):
|
|
515
|
partial = {}
|
|
515
|
partial = {}
|
|
516
|
try:
|
|
516
|
try:
|
|
517
|
f = self.opener("cache/branchheads")
|
|
517
|
f = self.opener("cache/branchheads")
|
|
518
|
lines = f.read().split('\n')
|
|
518
|
lines = f.read().split('\n')
|
|
519
|
f.close()
|
|
519
|
f.close()
|
|
520
|
except (IOError, OSError):
|
|
520
|
except (IOError, OSError):
|
|
521
|
return {}, nullid, nullrev
|
|
521
|
return {}, nullid, nullrev
|
|
522
|
|
|
522
|
|
|
523
|
try:
|
|
523
|
try:
|
|
524
|
last, lrev = lines.pop(0).split(" ", 1)
|
|
524
|
last, lrev = lines.pop(0).split(" ", 1)
|
|
525
|
last, lrev = bin(last), int(lrev)
|
|
525
|
last, lrev = bin(last), int(lrev)
|
|
526
|
if lrev >= len(self) or self[lrev].node() != last:
|
|
526
|
if lrev >= len(self) or self[lrev].node() != last:
|
|
527
|
# invalidate the cache
|
|
527
|
# invalidate the cache
|
|
528
|
raise ValueError('invalidating branch cache (tip differs)')
|
|
528
|
raise ValueError('invalidating branch cache (tip differs)')
|
|
529
|
for l in lines:
|
|
529
|
for l in lines:
|
|
530
|
if not l:
|
|
530
|
if not l:
|
|
531
|
continue
|
|
531
|
continue
|
|
532
|
node, label = l.split(" ", 1)
|
|
532
|
node, label = l.split(" ", 1)
|
|
533
|
label = encoding.tolocal(label.strip())
|
|
533
|
label = encoding.tolocal(label.strip())
|
|
534
|
partial.setdefault(label, []).append(bin(node))
|
|
534
|
partial.setdefault(label, []).append(bin(node))
|
|
535
|
except KeyboardInterrupt:
|
|
535
|
except KeyboardInterrupt:
|
|
536
|
raise
|
|
536
|
raise
|
|
537
|
except Exception, inst:
|
|
537
|
except Exception, inst:
|
|
538
|
if self.ui.debugflag:
|
|
538
|
if self.ui.debugflag:
|
|
539
|
self.ui.warn(str(inst), '\n')
|
|
539
|
self.ui.warn(str(inst), '\n')
|
|
540
|
partial, last, lrev = {}, nullid, nullrev
|
|
540
|
partial, last, lrev = {}, nullid, nullrev
|
|
541
|
return partial, last, lrev
|
|
541
|
return partial, last, lrev
|
|
542
|
|
|
542
|
|
|
543
|
def _writebranchcache(self, branches, tip, tiprev):
|
|
543
|
def _writebranchcache(self, branches, tip, tiprev):
|
|
544
|
try:
|
|
544
|
try:
|
|
545
|
f = self.opener("cache/branchheads", "w", atomictemp=True)
|
|
545
|
f = self.opener("cache/branchheads", "w", atomictemp=True)
|
|
546
|
f.write("%s %s\n" % (hex(tip), tiprev))
|
|
546
|
f.write("%s %s\n" % (hex(tip), tiprev))
|
|
547
|
for label, nodes in branches.iteritems():
|
|
547
|
for label, nodes in branches.iteritems():
|
|
548
|
for node in nodes:
|
|
548
|
for node in nodes:
|
|
549
|
f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
|
|
549
|
f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
|
|
550
|
f.close()
|
|
550
|
f.close()
|
|
551
|
except (IOError, OSError):
|
|
551
|
except (IOError, OSError):
|
|
552
|
pass
|
|
552
|
pass
|
|
553
|
|
|
553
|
|
|
554
|
def _updatebranchcache(self, partial, ctxgen):
|
|
554
|
def _updatebranchcache(self, partial, ctxgen):
|
|
555
|
# collect new branch entries
|
|
555
|
# collect new branch entries
|
|
556
|
newbranches = {}
|
|
556
|
newbranches = {}
|
|
557
|
for c in ctxgen:
|
|
557
|
for c in ctxgen:
|
|
558
|
newbranches.setdefault(c.branch(), []).append(c.node())
|
|
558
|
newbranches.setdefault(c.branch(), []).append(c.node())
|
|
559
|
# if older branchheads are reachable from new ones, they aren't
|
|
559
|
# if older branchheads are reachable from new ones, they aren't
|
|
560
|
# really branchheads. Note checking parents is insufficient:
|
|
560
|
# really branchheads. Note checking parents is insufficient:
|
|
561
|
# 1 (branch a) -> 2 (branch b) -> 3 (branch a)
|
|
561
|
# 1 (branch a) -> 2 (branch b) -> 3 (branch a)
|
|
562
|
for branch, newnodes in newbranches.iteritems():
|
|
562
|
for branch, newnodes in newbranches.iteritems():
|
|
563
|
bheads = partial.setdefault(branch, [])
|
|
563
|
bheads = partial.setdefault(branch, [])
|
|
564
|
bheads.extend(newnodes)
|
|
564
|
bheads.extend(newnodes)
|
|
565
|
if len(bheads) <= 1:
|
|
565
|
if len(bheads) <= 1:
|
|
566
|
continue
|
|
566
|
continue
|
|
567
|
bheads = sorted(bheads, key=lambda x: self[x].rev())
|
|
567
|
bheads = sorted(bheads, key=lambda x: self[x].rev())
|
|
568
|
# starting from tip means fewer passes over reachable
|
|
568
|
# starting from tip means fewer passes over reachable
|
|
569
|
while newnodes:
|
|
569
|
while newnodes:
|
|
570
|
latest = newnodes.pop()
|
|
570
|
latest = newnodes.pop()
|
|
571
|
if latest not in bheads:
|
|
571
|
if latest not in bheads:
|
|
572
|
continue
|
|
572
|
continue
|
|
573
|
minbhrev = self[bheads[0]].node()
|
|
573
|
minbhrev = self[bheads[0]].node()
|
|
574
|
reachable = self.changelog.reachable(latest, minbhrev)
|
|
574
|
reachable = self.changelog.reachable(latest, minbhrev)
|
|
575
|
reachable.remove(latest)
|
|
575
|
reachable.remove(latest)
|
|
576
|
if reachable:
|
|
576
|
if reachable:
|
|
577
|
bheads = [b for b in bheads if b not in reachable]
|
|
577
|
bheads = [b for b in bheads if b not in reachable]
|
|
578
|
partial[branch] = bheads
|
|
578
|
partial[branch] = bheads
|
|
579
|
|
|
579
|
|
|
580
|
def lookup(self, key):
|
|
580
|
def lookup(self, key):
|
|
581
|
if isinstance(key, int):
|
|
581
|
if isinstance(key, int):
|
|
582
|
return self.changelog.node(key)
|
|
582
|
return self.changelog.node(key)
|
|
583
|
elif key == '.':
|
|
583
|
elif key == '.':
|
|
584
|
return self.dirstate.p1()
|
|
584
|
return self.dirstate.p1()
|
|
585
|
elif key == 'null':
|
|
585
|
elif key == 'null':
|
|
586
|
return nullid
|
|
586
|
return nullid
|
|
587
|
elif key == 'tip':
|
|
587
|
elif key == 'tip':
|
|
588
|
return self.changelog.tip()
|
|
588
|
return self.changelog.tip()
|
|
589
|
n = self.changelog._match(key)
|
|
589
|
n = self.changelog._match(key)
|
|
590
|
if n:
|
|
590
|
if n:
|
|
591
|
return n
|
|
591
|
return n
|
|
592
|
if key in self._bookmarks:
|
|
592
|
if key in self._bookmarks:
|
|
593
|
return self._bookmarks[key]
|
|
593
|
return self._bookmarks[key]
|
|
594
|
if key in self.tags():
|
|
594
|
if key in self.tags():
|
|
595
|
return self.tags()[key]
|
|
595
|
return self.tags()[key]
|
|
596
|
if key in self.branchtags():
|
|
596
|
if key in self.branchtags():
|
|
597
|
return self.branchtags()[key]
|
|
597
|
return self.branchtags()[key]
|
|
598
|
n = self.changelog._partialmatch(key)
|
|
598
|
n = self.changelog._partialmatch(key)
|
|
599
|
if n:
|
|
599
|
if n:
|
|
600
|
return n
|
|
600
|
return n
|
|
601
|
|
|
601
|
|
|
602
|
# can't find key, check if it might have come from damaged dirstate
|
|
602
|
# can't find key, check if it might have come from damaged dirstate
|
|
603
|
if key in self.dirstate.parents():
|
|
603
|
if key in self.dirstate.parents():
|
|
604
|
raise error.Abort(_("working directory has unknown parent '%s'!")
|
|
604
|
raise error.Abort(_("working directory has unknown parent '%s'!")
|
|
605
|
% short(key))
|
|
605
|
% short(key))
|
|
606
|
try:
|
|
606
|
try:
|
|
607
|
if len(key) == 20:
|
|
607
|
if len(key) == 20:
|
|
608
|
key = hex(key)
|
|
608
|
key = hex(key)
|
|
609
|
except TypeError:
|
|
609
|
except TypeError:
|
|
610
|
pass
|
|
610
|
pass
|
|
611
|
raise error.RepoLookupError(_("unknown revision '%s'") % key)
|
|
611
|
raise error.RepoLookupError(_("unknown revision '%s'") % key)
|
|
612
|
|
|
612
|
|
|
613
|
def lookupbranch(self, key, remote=None):
|
|
613
|
def lookupbranch(self, key, remote=None):
|
|
614
|
repo = remote or self
|
|
614
|
repo = remote or self
|
|
615
|
if key in repo.branchmap():
|
|
615
|
if key in repo.branchmap():
|
|
616
|
return key
|
|
616
|
return key
|
|
617
|
|
|
617
|
|
|
618
|
repo = (remote and remote.local()) and remote or self
|
|
618
|
repo = (remote and remote.local()) and remote or self
|
|
619
|
return repo[key].branch()
|
|
619
|
return repo[key].branch()
|
|
620
|
|
|
620
|
|
|
621
|
def known(self, nodes):
|
|
621
|
def known(self, nodes):
|
|
622
|
nm = self.changelog.nodemap
|
|
622
|
nm = self.changelog.nodemap
|
|
623
|
return [(n in nm) for n in nodes]
|
|
623
|
return [(n in nm) for n in nodes]
|
|
624
|
|
|
624
|
|
|
625
|
def local(self):
|
|
625
|
def local(self):
|
|
626
|
return self
|
|
626
|
return self
|
|
627
|
|
|
627
|
|
|
628
|
def join(self, f):
|
|
628
|
def join(self, f):
|
|
629
|
return os.path.join(self.path, f)
|
|
629
|
return os.path.join(self.path, f)
|
|
630
|
|
|
630
|
|
|
631
|
def wjoin(self, f):
|
|
631
|
def wjoin(self, f):
|
|
632
|
return os.path.join(self.root, f)
|
|
632
|
return os.path.join(self.root, f)
|
|
633
|
|
|
633
|
|
|
634
|
def file(self, f):
|
|
634
|
def file(self, f):
|
|
635
|
if f[0] == '/':
|
|
635
|
if f[0] == '/':
|
|
636
|
f = f[1:]
|
|
636
|
f = f[1:]
|
|
637
|
return filelog.filelog(self.sopener, f)
|
|
637
|
return filelog.filelog(self.sopener, f)
|
|
638
|
|
|
638
|
|
|
639
|
def changectx(self, changeid):
|
|
639
|
def changectx(self, changeid):
|
|
640
|
return self[changeid]
|
|
640
|
return self[changeid]
|
|
641
|
|
|
641
|
|
|
642
|
def parents(self, changeid=None):
|
|
642
|
def parents(self, changeid=None):
|
|
643
|
'''get list of changectxs for parents of changeid'''
|
|
643
|
'''get list of changectxs for parents of changeid'''
|
|
644
|
return self[changeid].parents()
|
|
644
|
return self[changeid].parents()
|
|
645
|
|
|
645
|
|
|
646
|
def filectx(self, path, changeid=None, fileid=None):
|
|
646
|
def filectx(self, path, changeid=None, fileid=None):
|
|
647
|
"""changeid can be a changeset revision, node, or tag.
|
|
647
|
"""changeid can be a changeset revision, node, or tag.
|
|
648
|
fileid can be a file revision or node."""
|
|
648
|
fileid can be a file revision or node."""
|
|
649
|
return context.filectx(self, path, changeid, fileid)
|
|
649
|
return context.filectx(self, path, changeid, fileid)
|
|
650
|
|
|
650
|
|
|
651
|
def getcwd(self):
|
|
651
|
def getcwd(self):
|
|
652
|
return self.dirstate.getcwd()
|
|
652
|
return self.dirstate.getcwd()
|
|
653
|
|
|
653
|
|
|
654
|
def pathto(self, f, cwd=None):
|
|
654
|
def pathto(self, f, cwd=None):
|
|
655
|
return self.dirstate.pathto(f, cwd)
|
|
655
|
return self.dirstate.pathto(f, cwd)
|
|
656
|
|
|
656
|
|
|
657
|
def wfile(self, f, mode='r'):
|
|
657
|
def wfile(self, f, mode='r'):
|
|
658
|
return self.wopener(f, mode)
|
|
658
|
return self.wopener(f, mode)
|
|
659
|
|
|
659
|
|
|
660
|
def _link(self, f):
|
|
660
|
def _link(self, f):
|
|
661
|
return os.path.islink(self.wjoin(f))
|
|
661
|
return os.path.islink(self.wjoin(f))
|
|
662
|
|
|
662
|
|
|
663
|
def _loadfilter(self, filter):
|
|
663
|
def _loadfilter(self, filter):
|
|
664
|
if filter not in self.filterpats:
|
|
664
|
if filter not in self.filterpats:
|
|
665
|
l = []
|
|
665
|
l = []
|
|
666
|
for pat, cmd in self.ui.configitems(filter):
|
|
666
|
for pat, cmd in self.ui.configitems(filter):
|
|
667
|
if cmd == '!':
|
|
667
|
if cmd == '!':
|
|
668
|
continue
|
|
668
|
continue
|
|
669
|
mf = matchmod.match(self.root, '', [pat])
|
|
669
|
mf = matchmod.match(self.root, '', [pat])
|
|
670
|
fn = None
|
|
670
|
fn = None
|
|
671
|
params = cmd
|
|
671
|
params = cmd
|
|
672
|
for name, filterfn in self._datafilters.iteritems():
|
|
672
|
for name, filterfn in self._datafilters.iteritems():
|
|
673
|
if cmd.startswith(name):
|
|
673
|
if cmd.startswith(name):
|
|
674
|
fn = filterfn
|
|
674
|
fn = filterfn
|
|
675
|
params = cmd[len(name):].lstrip()
|
|
675
|
params = cmd[len(name):].lstrip()
|
|
676
|
break
|
|
676
|
break
|
|
677
|
if not fn:
|
|
677
|
if not fn:
|
|
678
|
fn = lambda s, c, **kwargs: util.filter(s, c)
|
|
678
|
fn = lambda s, c, **kwargs: util.filter(s, c)
|
|
679
|
# Wrap old filters not supporting keyword arguments
|
|
679
|
# Wrap old filters not supporting keyword arguments
|
|
680
|
if not inspect.getargspec(fn)[2]:
|
|
680
|
if not inspect.getargspec(fn)[2]:
|
|
681
|
oldfn = fn
|
|
681
|
oldfn = fn
|
|
682
|
fn = lambda s, c, **kwargs: oldfn(s, c)
|
|
682
|
fn = lambda s, c, **kwargs: oldfn(s, c)
|
|
683
|
l.append((mf, fn, params))
|
|
683
|
l.append((mf, fn, params))
|
|
684
|
self.filterpats[filter] = l
|
|
684
|
self.filterpats[filter] = l
|
|
685
|
return self.filterpats[filter]
|
|
685
|
return self.filterpats[filter]
|
|
686
|
|
|
686
|
|
|
687
|
def _filter(self, filterpats, filename, data):
|
|
687
|
def _filter(self, filterpats, filename, data):
|
|
688
|
for mf, fn, cmd in filterpats:
|
|
688
|
for mf, fn, cmd in filterpats:
|
|
689
|
if mf(filename):
|
|
689
|
if mf(filename):
|
|
690
|
self.ui.debug("filtering %s through %s\n" % (filename, cmd))
|
|
690
|
self.ui.debug("filtering %s through %s\n" % (filename, cmd))
|
|
691
|
data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
|
|
691
|
data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
|
|
692
|
break
|
|
692
|
break
|
|
693
|
|
|
693
|
|
|
694
|
return data
|
|
694
|
return data
|
|
695
|
|
|
695
|
|
|
696
|
@propertycache
|
|
696
|
@propertycache
|
|
697
|
def _encodefilterpats(self):
|
|
697
|
def _encodefilterpats(self):
|
|
698
|
return self._loadfilter('encode')
|
|
698
|
return self._loadfilter('encode')
|
|
699
|
|
|
699
|
|
|
700
|
@propertycache
|
|
700
|
@propertycache
|
|
701
|
def _decodefilterpats(self):
|
|
701
|
def _decodefilterpats(self):
|
|
702
|
return self._loadfilter('decode')
|
|
702
|
return self._loadfilter('decode')
|
|
703
|
|
|
703
|
|
|
704
|
def adddatafilter(self, name, filter):
|
|
704
|
def adddatafilter(self, name, filter):
|
|
705
|
self._datafilters[name] = filter
|
|
705
|
self._datafilters[name] = filter
|
|
706
|
|
|
706
|
|
|
707
|
def wread(self, filename):
|
|
707
|
def wread(self, filename):
|
|
708
|
if self._link(filename):
|
|
708
|
if self._link(filename):
|
|
709
|
data = os.readlink(self.wjoin(filename))
|
|
709
|
data = os.readlink(self.wjoin(filename))
|
|
710
|
else:
|
|
710
|
else:
|
|
711
|
data = self.wopener.read(filename)
|
|
711
|
data = self.wopener.read(filename)
|
|
712
|
return self._filter(self._encodefilterpats, filename, data)
|
|
712
|
return self._filter(self._encodefilterpats, filename, data)
|
|
713
|
|
|
713
|
|
|
714
|
def wwrite(self, filename, data, flags):
|
|
714
|
def wwrite(self, filename, data, flags):
|
|
715
|
data = self._filter(self._decodefilterpats, filename, data)
|
|
715
|
data = self._filter(self._decodefilterpats, filename, data)
|
|
716
|
if 'l' in flags:
|
|
716
|
if 'l' in flags:
|
|
717
|
self.wopener.symlink(data, filename)
|
|
717
|
self.wopener.symlink(data, filename)
|
|
718
|
else:
|
|
718
|
else:
|
|
719
|
self.wopener.write(filename, data)
|
|
719
|
self.wopener.write(filename, data)
|
|
720
|
if 'x' in flags:
|
|
720
|
if 'x' in flags:
|
|
721
|
util.setflags(self.wjoin(filename), False, True)
|
|
721
|
util.setflags(self.wjoin(filename), False, True)
|
|
722
|
|
|
722
|
|
|
723
|
def wwritedata(self, filename, data):
|
|
723
|
def wwritedata(self, filename, data):
|
|
724
|
return self._filter(self._decodefilterpats, filename, data)
|
|
724
|
return self._filter(self._decodefilterpats, filename, data)
|
|
725
|
|
|
725
|
|
|
726
|
def transaction(self, desc):
|
|
726
|
def transaction(self, desc):
|
|
727
|
tr = self._transref and self._transref() or None
|
|
727
|
tr = self._transref and self._transref() or None
|
|
728
|
if tr and tr.running():
|
|
728
|
if tr and tr.running():
|
|
729
|
return tr.nest()
|
|
729
|
return tr.nest()
|
|
730
|
|
|
730
|
|
|
731
|
# abort here if the journal already exists
|
|
731
|
# abort here if the journal already exists
|
|
732
|
if os.path.exists(self.sjoin("journal")):
|
|
732
|
if os.path.exists(self.sjoin("journal")):
|
|
733
|
raise error.RepoError(
|
|
733
|
raise error.RepoError(
|
|
734
|
_("abandoned transaction found - run hg recover"))
|
|
734
|
_("abandoned transaction found - run hg recover"))
|
|
735
|
|
|
735
|
|
|
736
|
journalfiles = self._writejournal(desc)
|
|
736
|
journalfiles = self._writejournal(desc)
|
|
737
|
renames = [(x, undoname(x)) for x in journalfiles]
|
|
737
|
renames = [(x, undoname(x)) for x in journalfiles]
|
|
738
|
|
|
738
|
|
|
739
|
tr = transaction.transaction(self.ui.warn, self.sopener,
|
|
739
|
tr = transaction.transaction(self.ui.warn, self.sopener,
|
|
740
|
self.sjoin("journal"),
|
|
740
|
self.sjoin("journal"),
|
|
741
|
aftertrans(renames),
|
|
741
|
aftertrans(renames),
|
|
742
|
self.store.createmode)
|
|
742
|
self.store.createmode)
|
|
743
|
self._transref = weakref.ref(tr)
|
|
743
|
self._transref = weakref.ref(tr)
|
|
744
|
return tr
|
|
744
|
return tr
|
|
745
|
|
|
745
|
|
|
746
|
def _writejournal(self, desc):
|
|
746
|
def _writejournal(self, desc):
|
|
747
|
# save dirstate for rollback
|
|
747
|
# save dirstate for rollback
|
|
748
|
try:
|
|
748
|
try:
|
|
749
|
ds = self.opener.read("dirstate")
|
|
749
|
ds = self.opener.read("dirstate")
|
|
750
|
except IOError:
|
|
750
|
except IOError:
|
|
751
|
ds = ""
|
|
751
|
ds = ""
|
|
752
|
self.opener.write("journal.dirstate", ds)
|
|
752
|
self.opener.write("journal.dirstate", ds)
|
|
753
|
self.opener.write("journal.branch",
|
|
753
|
self.opener.write("journal.branch",
|
|
754
|
encoding.fromlocal(self.dirstate.branch()))
|
|
754
|
encoding.fromlocal(self.dirstate.branch()))
|
|
755
|
self.opener.write("journal.desc",
|
|
755
|
self.opener.write("journal.desc",
|
|
756
|
"%d\n%s\n" % (len(self), desc))
|
|
756
|
"%d\n%s\n" % (len(self), desc))
|
|
757
|
|
|
757
|
|
|
758
|
bkname = self.join('bookmarks')
|
|
758
|
bkname = self.join('bookmarks')
|
|
759
|
if os.path.exists(bkname):
|
|
759
|
if os.path.exists(bkname):
|
|
760
|
util.copyfile(bkname, self.join('journal.bookmarks'))
|
|
760
|
util.copyfile(bkname, self.join('journal.bookmarks'))
|
|
761
|
else:
|
|
761
|
else:
|
|
762
|
self.opener.write('journal.bookmarks', '')
|
|
762
|
self.opener.write('journal.bookmarks', '')
|
|
763
|
phasesname = self.sjoin('phaseroots')
|
|
763
|
phasesname = self.sjoin('phaseroots')
|
|
764
|
if os.path.exists(phasesname):
|
|
764
|
if os.path.exists(phasesname):
|
|
765
|
util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
|
|
765
|
util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
|
|
766
|
else:
|
|
766
|
else:
|
|
767
|
self.sopener.write('journal.phaseroots', '')
|
|
767
|
self.sopener.write('journal.phaseroots', '')
|
|
768
|
|
|
768
|
|
|
769
|
return (self.sjoin('journal'), self.join('journal.dirstate'),
|
|
769
|
return (self.sjoin('journal'), self.join('journal.dirstate'),
|
|
770
|
self.join('journal.branch'), self.join('journal.desc'),
|
|
770
|
self.join('journal.branch'), self.join('journal.desc'),
|
|
771
|
self.join('journal.bookmarks'),
|
|
771
|
self.join('journal.bookmarks'),
|
|
772
|
self.sjoin('journal.phaseroots'))
|
|
772
|
self.sjoin('journal.phaseroots'))
|
|
773
|
|
|
773
|
|
|
774
|
def recover(self):
|
|
774
|
def recover(self):
|
|
775
|
lock = self.lock()
|
|
775
|
lock = self.lock()
|
|
776
|
try:
|
|
776
|
try:
|
|
777
|
if os.path.exists(self.sjoin("journal")):
|
|
777
|
if os.path.exists(self.sjoin("journal")):
|
|
778
|
self.ui.status(_("rolling back interrupted transaction\n"))
|
|
778
|
self.ui.status(_("rolling back interrupted transaction\n"))
|
|
779
|
transaction.rollback(self.sopener, self.sjoin("journal"),
|
|
779
|
transaction.rollback(self.sopener, self.sjoin("journal"),
|
|
780
|
self.ui.warn)
|
|
780
|
self.ui.warn)
|
|
781
|
self.invalidate()
|
|
781
|
self.invalidate()
|
|
782
|
return True
|
|
782
|
return True
|
|
783
|
else:
|
|
783
|
else:
|
|
784
|
self.ui.warn(_("no interrupted transaction available\n"))
|
|
784
|
self.ui.warn(_("no interrupted transaction available\n"))
|
|
785
|
return False
|
|
785
|
return False
|
|
786
|
finally:
|
|
786
|
finally:
|
|
787
|
lock.release()
|
|
787
|
lock.release()
|
|
788
|
|
|
788
|
|
|
789
|
def rollback(self, dryrun=False, force=False):
|
|
789
|
def rollback(self, dryrun=False, force=False):
|
|
790
|
wlock = lock = None
|
|
790
|
wlock = lock = None
|
|
791
|
try:
|
|
791
|
try:
|
|
792
|
wlock = self.wlock()
|
|
792
|
wlock = self.wlock()
|
|
793
|
lock = self.lock()
|
|
793
|
lock = self.lock()
|
|
794
|
if os.path.exists(self.sjoin("undo")):
|
|
794
|
if os.path.exists(self.sjoin("undo")):
|
|
795
|
return self._rollback(dryrun, force)
|
|
795
|
return self._rollback(dryrun, force)
|
|
796
|
else:
|
|
796
|
else:
|
|
797
|
self.ui.warn(_("no rollback information available\n"))
|
|
797
|
self.ui.warn(_("no rollback information available\n"))
|
|
798
|
return 1
|
|
798
|
return 1
|
|
799
|
finally:
|
|
799
|
finally:
|
|
800
|
release(lock, wlock)
|
|
800
|
release(lock, wlock)
|
|
801
|
|
|
801
|
|
|
802
|
def _rollback(self, dryrun, force):
|
|
802
|
def _rollback(self, dryrun, force):
|
|
803
|
ui = self.ui
|
|
803
|
ui = self.ui
|
|
804
|
try:
|
|
804
|
try:
|
|
805
|
args = self.opener.read('undo.desc').splitlines()
|
|
805
|
args = self.opener.read('undo.desc').splitlines()
|
|
806
|
(oldlen, desc, detail) = (int(args[0]), args[1], None)
|
|
806
|
(oldlen, desc, detail) = (int(args[0]), args[1], None)
|
|
807
|
if len(args) >= 3:
|
|
807
|
if len(args) >= 3:
|
|
808
|
detail = args[2]
|
|
808
|
detail = args[2]
|
|
809
|
oldtip = oldlen - 1
|
|
809
|
oldtip = oldlen - 1
|
|
810
|
|
|
810
|
|
|
811
|
if detail and ui.verbose:
|
|
811
|
if detail and ui.verbose:
|
|
812
|
msg = (_('repository tip rolled back to revision %s'
|
|
812
|
msg = (_('repository tip rolled back to revision %s'
|
|
813
|
' (undo %s: %s)\n')
|
|
813
|
' (undo %s: %s)\n')
|
|
814
|
% (oldtip, desc, detail))
|
|
814
|
% (oldtip, desc, detail))
|
|
815
|
else:
|
|
815
|
else:
|
|
816
|
msg = (_('repository tip rolled back to revision %s'
|
|
816
|
msg = (_('repository tip rolled back to revision %s'
|
|
817
|
' (undo %s)\n')
|
|
817
|
' (undo %s)\n')
|
|
818
|
% (oldtip, desc))
|
|
818
|
% (oldtip, desc))
|
|
819
|
except IOError:
|
|
819
|
except IOError:
|
|
820
|
msg = _('rolling back unknown transaction\n')
|
|
820
|
msg = _('rolling back unknown transaction\n')
|
|
821
|
desc = None
|
|
821
|
desc = None
|
|
822
|
|
|
822
|
|
|
823
|
if not force and self['.'] != self['tip'] and desc == 'commit':
|
|
823
|
if not force and self['.'] != self['tip'] and desc == 'commit':
|
|
824
|
raise util.Abort(
|
|
824
|
raise util.Abort(
|
|
825
|
_('rollback of last commit while not checked out '
|
|
825
|
_('rollback of last commit while not checked out '
|
|
826
|
'may lose data'), hint=_('use -f to force'))
|
|
826
|
'may lose data'), hint=_('use -f to force'))
|
|
827
|
|
|
827
|
|
|
828
|
ui.status(msg)
|
|
828
|
ui.status(msg)
|
|
829
|
if dryrun:
|
|
829
|
if dryrun:
|
|
830
|
return 0
|
|
830
|
return 0
|
|
831
|
|
|
831
|
|
|
832
|
parents = self.dirstate.parents()
|
|
832
|
parents = self.dirstate.parents()
|
|
833
|
transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
|
|
833
|
transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
|
|
834
|
if os.path.exists(self.join('undo.bookmarks')):
|
|
834
|
if os.path.exists(self.join('undo.bookmarks')):
|
|
835
|
util.rename(self.join('undo.bookmarks'),
|
|
835
|
util.rename(self.join('undo.bookmarks'),
|
|
836
|
self.join('bookmarks'))
|
|
836
|
self.join('bookmarks'))
|
|
837
|
if os.path.exists(self.sjoin('undo.phaseroots')):
|
|
837
|
if os.path.exists(self.sjoin('undo.phaseroots')):
|
|
838
|
util.rename(self.sjoin('undo.phaseroots'),
|
|
838
|
util.rename(self.sjoin('undo.phaseroots'),
|
|
839
|
self.sjoin('phaseroots'))
|
|
839
|
self.sjoin('phaseroots'))
|
|
840
|
self.invalidate()
|
|
840
|
self.invalidate()
|
|
841
|
|
|
841
|
|
|
842
|
parentgone = (parents[0] not in self.changelog.nodemap or
|
|
842
|
parentgone = (parents[0] not in self.changelog.nodemap or
|
|
843
|
parents[1] not in self.changelog.nodemap)
|
|
843
|
parents[1] not in self.changelog.nodemap)
|
|
844
|
if parentgone:
|
|
844
|
if parentgone:
|
|
845
|
util.rename(self.join('undo.dirstate'), self.join('dirstate'))
|
|
845
|
util.rename(self.join('undo.dirstate'), self.join('dirstate'))
|
|
846
|
try:
|
|
846
|
try:
|
|
847
|
branch = self.opener.read('undo.branch')
|
|
847
|
branch = self.opener.read('undo.branch')
|
|
848
|
self.dirstate.setbranch(branch)
|
|
848
|
self.dirstate.setbranch(branch)
|
|
849
|
except IOError:
|
|
849
|
except IOError:
|
|
850
|
ui.warn(_('named branch could not be reset: '
|
|
850
|
ui.warn(_('named branch could not be reset: '
|
|
851
|
'current branch is still \'%s\'\n')
|
|
851
|
'current branch is still \'%s\'\n')
|
|
852
|
% self.dirstate.branch())
|
|
852
|
% self.dirstate.branch())
|
|
853
|
|
|
853
|
|
|
854
|
self.dirstate.invalidate()
|
|
854
|
self.dirstate.invalidate()
|
|
855
|
self.destroyed()
|
|
855
|
self.destroyed()
|
|
856
|
parents = tuple([p.rev() for p in self.parents()])
|
|
856
|
parents = tuple([p.rev() for p in self.parents()])
|
|
857
|
if len(parents) > 1:
|
|
857
|
if len(parents) > 1:
|
|
858
|
ui.status(_('working directory now based on '
|
|
858
|
ui.status(_('working directory now based on '
|
|
859
|
'revisions %d and %d\n') % parents)
|
|
859
|
'revisions %d and %d\n') % parents)
|
|
860
|
else:
|
|
860
|
else:
|
|
861
|
ui.status(_('working directory now based on '
|
|
861
|
ui.status(_('working directory now based on '
|
|
862
|
'revision %d\n') % parents)
|
|
862
|
'revision %d\n') % parents)
|
|
863
|
return 0
|
|
863
|
return 0
|
|
864
|
|
|
864
|
|
|
865
|
def invalidatecaches(self):
|
|
865
|
def invalidatecaches(self):
|
|
866
|
try:
|
|
866
|
try:
|
|
867
|
delattr(self, '_tagscache')
|
|
867
|
delattr(self, '_tagscache')
|
|
868
|
except AttributeError:
|
|
868
|
except AttributeError:
|
|
869
|
pass
|
|
869
|
pass
|
|
870
|
|
|
870
|
|
|
871
|
self._branchcache = None # in UTF-8
|
|
871
|
self._branchcache = None # in UTF-8
|
|
872
|
self._branchcachetip = None
|
|
872
|
self._branchcachetip = None
|
|
873
|
|
|
873
|
|
|
874
|
def invalidatedirstate(self):
|
|
874
|
def invalidatedirstate(self):
|
|
875
|
'''Invalidates the dirstate, causing the next call to dirstate
|
|
875
|
'''Invalidates the dirstate, causing the next call to dirstate
|
|
876
|
to check if it was modified since the last time it was read,
|
|
876
|
to check if it was modified since the last time it was read,
|
|
877
|
rereading it if it has.
|
|
877
|
rereading it if it has.
|
|
878
|
|
|
878
|
|
|
879
|
This is different to dirstate.invalidate() that it doesn't always
|
|
879
|
This is different to dirstate.invalidate() that it doesn't always
|
|
880
|
rereads the dirstate. Use dirstate.invalidate() if you want to
|
|
880
|
rereads the dirstate. Use dirstate.invalidate() if you want to
|
|
881
|
explicitly read the dirstate again (i.e. restoring it to a previous
|
|
881
|
explicitly read the dirstate again (i.e. restoring it to a previous
|
|
882
|
known good state).'''
|
|
882
|
known good state).'''
|
|
883
|
try:
|
|
883
|
try:
|
|
884
|
delattr(self, 'dirstate')
|
|
884
|
delattr(self, 'dirstate')
|
|
885
|
except AttributeError:
|
|
885
|
except AttributeError:
|
|
886
|
pass
|
|
886
|
pass
|
|
887
|
|
|
887
|
|
|
888
|
def invalidate(self):
|
|
888
|
def invalidate(self):
|
|
889
|
for k in self._filecache:
|
|
889
|
for k in self._filecache:
|
|
890
|
# dirstate is invalidated separately in invalidatedirstate()
|
|
890
|
# dirstate is invalidated separately in invalidatedirstate()
|
|
891
|
if k == 'dirstate':
|
|
891
|
if k == 'dirstate':
|
|
892
|
continue
|
|
892
|
continue
|
|
893
|
|
|
893
|
|
|
894
|
try:
|
|
894
|
try:
|
|
895
|
delattr(self, k)
|
|
895
|
delattr(self, k)
|
|
896
|
except AttributeError:
|
|
896
|
except AttributeError:
|
|
897
|
pass
|
|
897
|
pass
|
|
898
|
self.invalidatecaches()
|
|
898
|
self.invalidatecaches()
|
|
899
|
|
|
899
|
|
|
900
|
def _lock(self, lockname, wait, releasefn, acquirefn, desc):
|
|
900
|
def _lock(self, lockname, wait, releasefn, acquirefn, desc):
|
|
901
|
try:
|
|
901
|
try:
|
|
902
|
l = lock.lock(lockname, 0, releasefn, desc=desc)
|
|
902
|
l = lock.lock(lockname, 0, releasefn, desc=desc)
|
|
903
|
except error.LockHeld, inst:
|
|
903
|
except error.LockHeld, inst:
|
|
904
|
if not wait:
|
|
904
|
if not wait:
|
|
905
|
raise
|
|
905
|
raise
|
|
906
|
self.ui.warn(_("waiting for lock on %s held by %r\n") %
|
|
906
|
self.ui.warn(_("waiting for lock on %s held by %r\n") %
|
|
907
|
(desc, inst.locker))
|
|
907
|
(desc, inst.locker))
|
|
908
|
# default to 600 seconds timeout
|
|
908
|
# default to 600 seconds timeout
|
|
909
|
l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
|
|
909
|
l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
|
|
910
|
releasefn, desc=desc)
|
|
910
|
releasefn, desc=desc)
|
|
911
|
if acquirefn:
|
|
911
|
if acquirefn:
|
|
912
|
acquirefn()
|
|
912
|
acquirefn()
|
|
913
|
return l
|
|
913
|
return l
|
|
914
|
|
|
914
|
|
|
915
|
def lock(self, wait=True):
|
|
915
|
def lock(self, wait=True):
|
|
916
|
'''Lock the repository store (.hg/store) and return a weak reference
|
|
916
|
'''Lock the repository store (.hg/store) and return a weak reference
|
|
917
|
to the lock. Use this before modifying the store (e.g. committing or
|
|
917
|
to the lock. Use this before modifying the store (e.g. committing or
|
|
918
|
stripping). If you are opening a transaction, get a lock as well.)'''
|
|
918
|
stripping). If you are opening a transaction, get a lock as well.)'''
|
|
919
|
l = self._lockref and self._lockref()
|
|
919
|
l = self._lockref and self._lockref()
|
|
920
|
if l is not None and l.held:
|
|
920
|
if l is not None and l.held:
|
|
921
|
l.lock()
|
|
921
|
l.lock()
|
|
922
|
return l
|
|
922
|
return l
|
|
923
|
|
|
923
|
|
|
924
|
def unlock():
|
|
924
|
def unlock():
|
|
925
|
self.store.write()
|
|
925
|
self.store.write()
|
|
926
|
if self._dirtyphases:
|
|
926
|
if self._dirtyphases:
|
|
927
|
phases.writeroots(self)
|
|
927
|
phases.writeroots(self)
|
|
928
|
for k, ce in self._filecache.items():
|
|
928
|
for k, ce in self._filecache.items():
|
|
929
|
if k == 'dirstate':
|
|
929
|
if k == 'dirstate':
|
|
930
|
continue
|
|
930
|
continue
|
|
931
|
ce.refresh()
|
|
931
|
ce.refresh()
|
|
932
|
|
|
932
|
|
|
933
|
l = self._lock(self.sjoin("lock"), wait, unlock,
|
|
933
|
l = self._lock(self.sjoin("lock"), wait, unlock,
|
|
934
|
self.invalidate, _('repository %s') % self.origroot)
|
|
934
|
self.invalidate, _('repository %s') % self.origroot)
|
|
935
|
self._lockref = weakref.ref(l)
|
|
935
|
self._lockref = weakref.ref(l)
|
|
936
|
return l
|
|
936
|
return l
|
|
937
|
|
|
937
|
|
|
938
|
def wlock(self, wait=True):
|
|
938
|
def wlock(self, wait=True):
|
|
939
|
'''Lock the non-store parts of the repository (everything under
|
|
939
|
'''Lock the non-store parts of the repository (everything under
|
|
940
|
.hg except .hg/store) and return a weak reference to the lock.
|
|
940
|
.hg except .hg/store) and return a weak reference to the lock.
|
|
941
|
Use this before modifying files in .hg.'''
|
|
941
|
Use this before modifying files in .hg.'''
|
|
942
|
l = self._wlockref and self._wlockref()
|
|
942
|
l = self._wlockref and self._wlockref()
|
|
943
|
if l is not None and l.held:
|
|
943
|
if l is not None and l.held:
|
|
944
|
l.lock()
|
|
944
|
l.lock()
|
|
945
|
return l
|
|
945
|
return l
|
|
946
|
|
|
946
|
|
|
947
|
def unlock():
|
|
947
|
def unlock():
|
|
948
|
self.dirstate.write()
|
|
948
|
self.dirstate.write()
|
|
949
|
ce = self._filecache.get('dirstate')
|
|
949
|
ce = self._filecache.get('dirstate')
|
|
950
|
if ce:
|
|
950
|
if ce:
|
|
951
|
ce.refresh()
|
|
951
|
ce.refresh()
|
|
952
|
|
|
952
|
|
|
953
|
l = self._lock(self.join("wlock"), wait, unlock,
|
|
953
|
l = self._lock(self.join("wlock"), wait, unlock,
|
|
954
|
self.invalidatedirstate, _('working directory of %s') %
|
|
954
|
self.invalidatedirstate, _('working directory of %s') %
|
|
955
|
self.origroot)
|
|
955
|
self.origroot)
|
|
956
|
self._wlockref = weakref.ref(l)
|
|
956
|
self._wlockref = weakref.ref(l)
|
|
957
|
return l
|
|
957
|
return l
|
|
958
|
|
|
958
|
|
|
959
|
def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
|
|
959
|
def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
|
|
960
|
"""
|
|
960
|
"""
|
|
961
|
commit an individual file as part of a larger transaction
|
|
961
|
commit an individual file as part of a larger transaction
|
|
962
|
"""
|
|
962
|
"""
|
|
963
|
|
|
963
|
|
|
964
|
fname = fctx.path()
|
|
964
|
fname = fctx.path()
|
|
965
|
text = fctx.data()
|
|
965
|
text = fctx.data()
|
|
966
|
flog = self.file(fname)
|
|
966
|
flog = self.file(fname)
|
|
967
|
fparent1 = manifest1.get(fname, nullid)
|
|
967
|
fparent1 = manifest1.get(fname, nullid)
|
|
968
|
fparent2 = fparent2o = manifest2.get(fname, nullid)
|
|
968
|
fparent2 = fparent2o = manifest2.get(fname, nullid)
|
|
969
|
|
|
969
|
|
|
970
|
meta = {}
|
|
970
|
meta = {}
|
|
971
|
copy = fctx.renamed()
|
|
971
|
copy = fctx.renamed()
|
|
972
|
if copy and copy[0] != fname:
|
|
972
|
if copy and copy[0] != fname:
|
|
973
|
# Mark the new revision of this file as a copy of another
|
|
973
|
# Mark the new revision of this file as a copy of another
|
|
974
|
# file. This copy data will effectively act as a parent
|
|
974
|
# file. This copy data will effectively act as a parent
|
|
975
|
# of this new revision. If this is a merge, the first
|
|
975
|
# of this new revision. If this is a merge, the first
|
|
976
|
# parent will be the nullid (meaning "look up the copy data")
|
|
976
|
# parent will be the nullid (meaning "look up the copy data")
|
|
977
|
# and the second one will be the other parent. For example:
|
|
977
|
# and the second one will be the other parent. For example:
|
|
978
|
#
|
|
978
|
#
|
|
979
|
# 0 --- 1 --- 3 rev1 changes file foo
|
|
979
|
# 0 --- 1 --- 3 rev1 changes file foo
|
|
980
|
# \ / rev2 renames foo to bar and changes it
|
|
980
|
# \ / rev2 renames foo to bar and changes it
|
|
981
|
# \- 2 -/ rev3 should have bar with all changes and
|
|
981
|
# \- 2 -/ rev3 should have bar with all changes and
|
|
982
|
# should record that bar descends from
|
|
982
|
# should record that bar descends from
|
|
983
|
# bar in rev2 and foo in rev1
|
|
983
|
# bar in rev2 and foo in rev1
|
|
984
|
#
|
|
984
|
#
|
|
985
|
# this allows this merge to succeed:
|
|
985
|
# this allows this merge to succeed:
|
|
986
|
#
|
|
986
|
#
|
|
987
|
# 0 --- 1 --- 3 rev4 reverts the content change from rev2
|
|
987
|
# 0 --- 1 --- 3 rev4 reverts the content change from rev2
|
|
988
|
# \ / merging rev3 and rev4 should use bar@rev2
|
|
988
|
# \ / merging rev3 and rev4 should use bar@rev2
|
|
989
|
# \- 2 --- 4 as the merge base
|
|
989
|
# \- 2 --- 4 as the merge base
|
|
990
|
#
|
|
990
|
#
|
|
991
|
|
|
991
|
|
|
992
|
cfname = copy[0]
|
|
992
|
cfname = copy[0]
|
|
993
|
crev = manifest1.get(cfname)
|
|
993
|
crev = manifest1.get(cfname)
|
|
994
|
newfparent = fparent2
|
|
994
|
newfparent = fparent2
|
|
995
|
|
|
995
|
|
|
996
|
if manifest2: # branch merge
|
|
996
|
if manifest2: # branch merge
|
|
997
|
if fparent2 == nullid or crev is None: # copied on remote side
|
|
997
|
if fparent2 == nullid or crev is None: # copied on remote side
|
|
998
|
if cfname in manifest2:
|
|
998
|
if cfname in manifest2:
|
|
999
|
crev = manifest2[cfname]
|
|
999
|
crev = manifest2[cfname]
|
|
1000
|
newfparent = fparent1
|
|
1000
|
newfparent = fparent1
|
|
1001
|
|
|
1001
|
|
|
1002
|
# find source in nearest ancestor if we've lost track
|
|
1002
|
# find source in nearest ancestor if we've lost track
|
|
1003
|
if not crev:
|
|
1003
|
if not crev:
|
|
1004
|
self.ui.debug(" %s: searching for copy revision for %s\n" %
|
|
1004
|
self.ui.debug(" %s: searching for copy revision for %s\n" %
|
|
1005
|
(fname, cfname))
|
|
1005
|
(fname, cfname))
|
|
1006
|
for ancestor in self[None].ancestors():
|
|
1006
|
for ancestor in self[None].ancestors():
|
|
1007
|
if cfname in ancestor:
|
|
1007
|
if cfname in ancestor:
|
|
1008
|
crev = ancestor[cfname].filenode()
|
|
1008
|
crev = ancestor[cfname].filenode()
|
|
1009
|
break
|
|
1009
|
break
|
|
1010
|
|
|
1010
|
|
|
1011
|
if crev:
|
|
1011
|
if crev:
|
|
1012
|
self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
|
|
1012
|
self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
|
|
1013
|
meta["copy"] = cfname
|
|
1013
|
meta["copy"] = cfname
|
|
1014
|
meta["copyrev"] = hex(crev)
|
|
1014
|
meta["copyrev"] = hex(crev)
|
|
1015
|
fparent1, fparent2 = nullid, newfparent
|
|
1015
|
fparent1, fparent2 = nullid, newfparent
|
|
1016
|
else:
|
|
1016
|
else:
|
|
1017
|
self.ui.warn(_("warning: can't find ancestor for '%s' "
|
|
1017
|
self.ui.warn(_("warning: can't find ancestor for '%s' "
|
|
1018
|
"copied from '%s'!\n") % (fname, cfname))
|
|
1018
|
"copied from '%s'!\n") % (fname, cfname))
|
|
1019
|
|
|
1019
|
|
|
1020
|
elif fparent2 != nullid:
|
|
1020
|
elif fparent2 != nullid:
|
|
1021
|
# is one parent an ancestor of the other?
|
|
1021
|
# is one parent an ancestor of the other?
|
|
1022
|
fparentancestor = flog.ancestor(fparent1, fparent2)
|
|
1022
|
fparentancestor = flog.ancestor(fparent1, fparent2)
|
|
1023
|
if fparentancestor == fparent1:
|
|
1023
|
if fparentancestor == fparent1:
|
|
1024
|
fparent1, fparent2 = fparent2, nullid
|
|
1024
|
fparent1, fparent2 = fparent2, nullid
|
|
1025
|
elif fparentancestor == fparent2:
|
|
1025
|
elif fparentancestor == fparent2:
|
|
1026
|
fparent2 = nullid
|
|
1026
|
fparent2 = nullid
|
|
1027
|
|
|
1027
|
|
|
1028
|
# is the file changed?
|
|
1028
|
# is the file changed?
|
|
1029
|
if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
|
|
1029
|
if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
|
|
1030
|
changelist.append(fname)
|
|
1030
|
changelist.append(fname)
|
|
1031
|
return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
|
|
1031
|
return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
|
|
1032
|
|
|
1032
|
|
|
1033
|
# are just the flags changed during merge?
|
|
1033
|
# are just the flags changed during merge?
|
|
1034
|
if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
|
|
1034
|
if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
|
|
1035
|
changelist.append(fname)
|
|
1035
|
changelist.append(fname)
|
|
1036
|
|
|
1036
|
|
|
1037
|
return fparent1
|
|
1037
|
return fparent1
|
|
1038
|
|
|
1038
|
|
|
1039
|
def commit(self, text="", user=None, date=None, match=None, force=False,
|
|
1039
|
def commit(self, text="", user=None, date=None, match=None, force=False,
|
|
1040
|
editor=False, extra={}):
|
|
1040
|
editor=False, extra={}):
|
|
1041
|
"""Add a new revision to current repository.
|
|
1041
|
"""Add a new revision to current repository.
|
|
1042
|
|
|
1042
|
|
|
1043
|
Revision information is gathered from the working directory,
|
|
1043
|
Revision information is gathered from the working directory,
|
|
1044
|
match can be used to filter the committed files. If editor is
|
|
1044
|
match can be used to filter the committed files. If editor is
|
|
1045
|
supplied, it is called to get a commit message.
|
|
1045
|
supplied, it is called to get a commit message.
|
|
1046
|
"""
|
|
1046
|
"""
|
|
1047
|
|
|
1047
|
|
|
1048
|
def fail(f, msg):
|
|
1048
|
def fail(f, msg):
|
|
1049
|
raise util.Abort('%s: %s' % (f, msg))
|
|
1049
|
raise util.Abort('%s: %s' % (f, msg))
|
|
1050
|
|
|
1050
|
|
|
1051
|
if not match:
|
|
1051
|
if not match:
|
|
1052
|
match = matchmod.always(self.root, '')
|
|
1052
|
match = matchmod.always(self.root, '')
|
|
1053
|
|
|
1053
|
|
|
1054
|
if not force:
|
|
1054
|
if not force:
|
|
1055
|
vdirs = []
|
|
1055
|
vdirs = []
|
|
1056
|
match.dir = vdirs.append
|
|
1056
|
match.dir = vdirs.append
|
|
1057
|
match.bad = fail
|
|
1057
|
match.bad = fail
|
|
1058
|
|
|
1058
|
|
|
1059
|
wlock = self.wlock()
|
|
1059
|
wlock = self.wlock()
|
|
1060
|
try:
|
|
1060
|
try:
|
|
1061
|
wctx = self[None]
|
|
1061
|
wctx = self[None]
|
|
1062
|
merge = len(wctx.parents()) > 1
|
|
1062
|
merge = len(wctx.parents()) > 1
|
|
1063
|
|
|
1063
|
|
|
1064
|
if (not force and merge and match and
|
|
1064
|
if (not force and merge and match and
|
|
1065
|
(match.files() or match.anypats())):
|
|
1065
|
(match.files() or match.anypats())):
|
|
1066
|
raise util.Abort(_('cannot partially commit a merge '
|
|
1066
|
raise util.Abort(_('cannot partially commit a merge '
|
|
1067
|
'(do not specify files or patterns)'))
|
|
1067
|
'(do not specify files or patterns)'))
|
|
1068
|
|
|
1068
|
|
|
1069
|
changes = self.status(match=match, clean=force)
|
|
1069
|
changes = self.status(match=match, clean=force)
|
|
1070
|
if force:
|
|
1070
|
if force:
|
|
1071
|
changes[0].extend(changes[6]) # mq may commit unchanged files
|
|
1071
|
changes[0].extend(changes[6]) # mq may commit unchanged files
|
|
1072
|
|
|
1072
|
|
|
1073
|
# check subrepos
|
|
1073
|
# check subrepos
|
|
1074
|
subs = []
|
|
1074
|
subs = []
|
|
1075
|
removedsubs = set()
|
|
1075
|
removedsubs = set()
|
|
1076
|
if '.hgsub' in wctx:
|
|
1076
|
if '.hgsub' in wctx:
|
|
1077
|
# only manage subrepos and .hgsubstate if .hgsub is present
|
|
1077
|
# only manage subrepos and .hgsubstate if .hgsub is present
|
|
1078
|
for p in wctx.parents():
|
|
1078
|
for p in wctx.parents():
|
|
1079
|
removedsubs.update(s for s in p.substate if match(s))
|
|
1079
|
removedsubs.update(s for s in p.substate if match(s))
|
|
1080
|
for s in wctx.substate:
|
|
1080
|
for s in wctx.substate:
|
|
1081
|
removedsubs.discard(s)
|
|
1081
|
removedsubs.discard(s)
|
|
1082
|
if match(s) and wctx.sub(s).dirty():
|
|
1082
|
if match(s) and wctx.sub(s).dirty():
|
|
1083
|
subs.append(s)
|
|
1083
|
subs.append(s)
|
|
1084
|
if (subs or removedsubs):
|
|
1084
|
if (subs or removedsubs):
|
|
1085
|
if (not match('.hgsub') and
|
|
1085
|
if (not match('.hgsub') and
|
|
1086
|
'.hgsub' in (wctx.modified() + wctx.added())):
|
|
1086
|
'.hgsub' in (wctx.modified() + wctx.added())):
|
|
1087
|
raise util.Abort(
|
|
1087
|
raise util.Abort(
|
|
1088
|
_("can't commit subrepos without .hgsub"))
|
|
1088
|
_("can't commit subrepos without .hgsub"))
|
|
1089
|
if '.hgsubstate' not in changes[0]:
|
|
1089
|
if '.hgsubstate' not in changes[0]:
|
|
1090
|
changes[0].insert(0, '.hgsubstate')
|
|
1090
|
changes[0].insert(0, '.hgsubstate')
|
|
1091
|
if '.hgsubstate' in changes[2]:
|
|
1091
|
if '.hgsubstate' in changes[2]:
|
|
1092
|
changes[2].remove('.hgsubstate')
|
|
1092
|
changes[2].remove('.hgsubstate')
|
|
1093
|
elif '.hgsub' in changes[2]:
|
|
1093
|
elif '.hgsub' in changes[2]:
|
|
1094
|
# clean up .hgsubstate when .hgsub is removed
|
|
1094
|
# clean up .hgsubstate when .hgsub is removed
|
|
1095
|
if ('.hgsubstate' in wctx and
|
|
1095
|
if ('.hgsubstate' in wctx and
|
|
1096
|
'.hgsubstate' not in changes[0] + changes[1] + changes[2]):
|
|
1096
|
'.hgsubstate' not in changes[0] + changes[1] + changes[2]):
|
|
1097
|
changes[2].insert(0, '.hgsubstate')
|
|
1097
|
changes[2].insert(0, '.hgsubstate')
|
|
1098
|
|
|
1098
|
|
|
1099
|
if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
|
|
1099
|
if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
|
|
1100
|
changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
|
|
1100
|
changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
|
|
1101
|
if changedsubs:
|
|
1101
|
if changedsubs:
|
|
1102
|
raise util.Abort(_("uncommitted changes in subrepo %s")
|
|
1102
|
raise util.Abort(_("uncommitted changes in subrepo %s")
|
|
1103
|
% changedsubs[0],
|
|
1103
|
% changedsubs[0],
|
|
1104
|
hint=_("use --subrepos for recursive commit"))
|
|
1104
|
hint=_("use --subrepos for recursive commit"))
|
|
1105
|
|
|
1105
|
|
|
1106
|
# make sure all explicit patterns are matched
|
|
1106
|
# make sure all explicit patterns are matched
|
|
1107
|
if not force and match.files():
|
|
1107
|
if not force and match.files():
|
|
1108
|
matched = set(changes[0] + changes[1] + changes[2])
|
|
1108
|
matched = set(changes[0] + changes[1] + changes[2])
|
|
1109
|
|
|
1109
|
|
|
1110
|
for f in match.files():
|
|
1110
|
for f in match.files():
|
|
1111
|
if f == '.' or f in matched or f in wctx.substate:
|
|
1111
|
if f == '.' or f in matched or f in wctx.substate:
|
|
1112
|
continue
|
|
1112
|
continue
|
|
1113
|
if f in changes[3]: # missing
|
|
1113
|
if f in changes[3]: # missing
|
|
1114
|
fail(f, _('file not found!'))
|
|
1114
|
fail(f, _('file not found!'))
|
|
1115
|
if f in vdirs: # visited directory
|
|
1115
|
if f in vdirs: # visited directory
|
|
1116
|
d = f + '/'
|
|
1116
|
d = f + '/'
|
|
1117
|
for mf in matched:
|
|
1117
|
for mf in matched:
|
|
1118
|
if mf.startswith(d):
|
|
1118
|
if mf.startswith(d):
|
|
1119
|
break
|
|
1119
|
break
|
|
1120
|
else:
|
|
1120
|
else:
|
|
1121
|
fail(f, _("no match under directory!"))
|
|
1121
|
fail(f, _("no match under directory!"))
|
|
1122
|
elif f not in self.dirstate:
|
|
1122
|
elif f not in self.dirstate:
|
|
1123
|
fail(f, _("file not tracked!"))
|
|
1123
|
fail(f, _("file not tracked!"))
|
|
1124
|
|
|
1124
|
|
|
1125
|
if (not force and not extra.get("close") and not merge
|
|
1125
|
if (not force and not extra.get("close") and not merge
|
|
1126
|
and not (changes[0] or changes[1] or changes[2])
|
|
1126
|
and not (changes[0] or changes[1] or changes[2])
|
|
1127
|
and wctx.branch() == wctx.p1().branch()):
|
|
1127
|
and wctx.branch() == wctx.p1().branch()):
|
|
1128
|
return None
|
|
1128
|
return None
|
|
1129
|
|
|
1129
|
|
|
1130
|
ms = mergemod.mergestate(self)
|
|
1130
|
ms = mergemod.mergestate(self)
|
|
1131
|
for f in changes[0]:
|
|
1131
|
for f in changes[0]:
|
|
1132
|
if f in ms and ms[f] == 'u':
|
|
1132
|
if f in ms and ms[f] == 'u':
|
|
1133
|
raise util.Abort(_("unresolved merge conflicts "
|
|
1133
|
raise util.Abort(_("unresolved merge conflicts "
|
|
1134
|
"(see hg help resolve)"))
|
|
1134
|
"(see hg help resolve)"))
|
|
1135
|
|
|
1135
|
|
|
1136
|
cctx = context.workingctx(self, text, user, date, extra, changes)
|
|
1136
|
cctx = context.workingctx(self, text, user, date, extra, changes)
|
|
1137
|
if editor:
|
|
1137
|
if editor:
|
|
1138
|
cctx._text = editor(self, cctx, subs)
|
|
1138
|
cctx._text = editor(self, cctx, subs)
|
|
1139
|
edited = (text != cctx._text)
|
|
1139
|
edited = (text != cctx._text)
|
|
1140
|
|
|
1140
|
|
|
1141
|
# commit subs
|
|
1141
|
# commit subs
|
|
1142
|
if subs or removedsubs:
|
|
1142
|
if subs or removedsubs:
|
|
1143
|
state = wctx.substate.copy()
|
|
1143
|
state = wctx.substate.copy()
|
|
1144
|
for s in sorted(subs):
|
|
1144
|
for s in sorted(subs):
|
|
1145
|
sub = wctx.sub(s)
|
|
1145
|
sub = wctx.sub(s)
|
|
1146
|
self.ui.status(_('committing subrepository %s\n') %
|
|
1146
|
self.ui.status(_('committing subrepository %s\n') %
|
|
1147
|
subrepo.subrelpath(sub))
|
|
1147
|
subrepo.subrelpath(sub))
|
|
1148
|
sr = sub.commit(cctx._text, user, date)
|
|
1148
|
sr = sub.commit(cctx._text, user, date)
|
|
1149
|
state[s] = (state[s][0], sr)
|
|
1149
|
state[s] = (state[s][0], sr)
|
|
1150
|
subrepo.writestate(self, state)
|
|
1150
|
subrepo.writestate(self, state)
|
|
1151
|
|
|
1151
|
|
|
1152
|
# Save commit message in case this transaction gets rolled back
|
|
1152
|
# Save commit message in case this transaction gets rolled back
|
|
1153
|
# (e.g. by a pretxncommit hook). Leave the content alone on
|
|
1153
|
# (e.g. by a pretxncommit hook). Leave the content alone on
|
|
1154
|
# the assumption that the user will use the same editor again.
|
|
1154
|
# the assumption that the user will use the same editor again.
|
|
1155
|
msgfn = self.savecommitmessage(cctx._text)
|
|
1155
|
msgfn = self.savecommitmessage(cctx._text)
|
|
1156
|
|
|
1156
|
|
|
1157
|
p1, p2 = self.dirstate.parents()
|
|
1157
|
p1, p2 = self.dirstate.parents()
|
|
1158
|
hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
|
|
1158
|
hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
|
|
1159
|
try:
|
|
1159
|
try:
|
|
1160
|
self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
|
|
1160
|
self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
|
|
1161
|
ret = self.commitctx(cctx, True)
|
|
1161
|
ret = self.commitctx(cctx, True)
|
|
1162
|
except:
|
|
1162
|
except:
|
|
1163
|
if edited:
|
|
1163
|
if edited:
|
|
1164
|
self.ui.write(
|
|
1164
|
self.ui.write(
|
|
1165
|
_('note: commit message saved in %s\n') % msgfn)
|
|
1165
|
_('note: commit message saved in %s\n') % msgfn)
|
|
1166
|
raise
|
|
1166
|
raise
|
|
1167
|
|
|
1167
|
|
|
1168
|
# update bookmarks, dirstate and mergestate
|
|
1168
|
# update bookmarks, dirstate and mergestate
|
|
1169
|
bookmarks.update(self, p1, ret)
|
|
1169
|
bookmarks.update(self, p1, ret)
|
|
1170
|
for f in changes[0] + changes[1]:
|
|
1170
|
for f in changes[0] + changes[1]:
|
|
1171
|
self.dirstate.normal(f)
|
|
1171
|
self.dirstate.normal(f)
|
|
1172
|
for f in changes[2]:
|
|
1172
|
for f in changes[2]:
|
|
1173
|
self.dirstate.drop(f)
|
|
1173
|
self.dirstate.drop(f)
|
|
1174
|
self.dirstate.setparents(ret)
|
|
1174
|
self.dirstate.setparents(ret)
|
|
1175
|
ms.reset()
|
|
1175
|
ms.reset()
|
|
1176
|
finally:
|
|
1176
|
finally:
|
|
1177
|
wlock.release()
|
|
1177
|
wlock.release()
|
|
1178
|
|
|
1178
|
|
|
1179
|
self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
|
|
1179
|
self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
|
|
1180
|
return ret
|
|
1180
|
return ret
|
|
1181
|
|
|
1181
|
|
|
1182
|
def commitctx(self, ctx, error=False):
|
|
1182
|
def commitctx(self, ctx, error=False):
|
|
1183
|
"""Add a new revision to current repository.
|
|
1183
|
"""Add a new revision to current repository.
|
|
1184
|
Revision information is passed via the context argument.
|
|
1184
|
Revision information is passed via the context argument.
|
|
1185
|
"""
|
|
1185
|
"""
|
|
1186
|
|
|
1186
|
|
|
1187
|
tr = lock = None
|
|
1187
|
tr = lock = None
|
|
1188
|
removed = list(ctx.removed())
|
|
1188
|
removed = list(ctx.removed())
|
|
1189
|
p1, p2 = ctx.p1(), ctx.p2()
|
|
1189
|
p1, p2 = ctx.p1(), ctx.p2()
|
|
1190
|
user = ctx.user()
|
|
1190
|
user = ctx.user()
|
|
1191
|
|
|
1191
|
|
|
1192
|
lock = self.lock()
|
|
1192
|
lock = self.lock()
|
|
1193
|
try:
|
|
1193
|
try:
|
|
1194
|
tr = self.transaction("commit")
|
|
1194
|
tr = self.transaction("commit")
|
|
1195
|
trp = weakref.proxy(tr)
|
|
1195
|
trp = weakref.proxy(tr)
|
|
1196
|
|
|
1196
|
|
|
1197
|
if ctx.files():
|
|
1197
|
if ctx.files():
|
|
1198
|
m1 = p1.manifest().copy()
|
|
1198
|
m1 = p1.manifest().copy()
|
|
1199
|
m2 = p2.manifest()
|
|
1199
|
m2 = p2.manifest()
|
|
1200
|
|
|
1200
|
|
|
1201
|
# check in files
|
|
1201
|
# check in files
|
|
1202
|
new = {}
|
|
1202
|
new = {}
|
|
1203
|
changed = []
|
|
1203
|
changed = []
|
|
1204
|
linkrev = len(self)
|
|
1204
|
linkrev = len(self)
|
|
1205
|
for f in sorted(ctx.modified() + ctx.added()):
|
|
1205
|
for f in sorted(ctx.modified() + ctx.added()):
|
|
1206
|
self.ui.note(f + "\n")
|
|
1206
|
self.ui.note(f + "\n")
|
|
1207
|
try:
|
|
1207
|
try:
|
|
1208
|
fctx = ctx[f]
|
|
1208
|
fctx = ctx[f]
|
|
1209
|
new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
|
|
1209
|
new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
|
|
1210
|
changed)
|
|
1210
|
changed)
|
|
1211
|
m1.set(f, fctx.flags())
|
|
1211
|
m1.set(f, fctx.flags())
|
|
1212
|
except OSError, inst:
|
|
1212
|
except OSError, inst:
|
|
1213
|
self.ui.warn(_("trouble committing %s!\n") % f)
|
|
1213
|
self.ui.warn(_("trouble committing %s!\n") % f)
|
|
1214
|
raise
|
|
1214
|
raise
|
|
1215
|
except IOError, inst:
|
|
1215
|
except IOError, inst:
|
|
1216
|
errcode = getattr(inst, 'errno', errno.ENOENT)
|
|
1216
|
errcode = getattr(inst, 'errno', errno.ENOENT)
|
|
1217
|
if error or errcode and errcode != errno.ENOENT:
|
|
1217
|
if error or errcode and errcode != errno.ENOENT:
|
|
1218
|
self.ui.warn(_("trouble committing %s!\n") % f)
|
|
1218
|
self.ui.warn(_("trouble committing %s!\n") % f)
|
|
1219
|
raise
|
|
1219
|
raise
|
|
1220
|
else:
|
|
1220
|
else:
|
|
1221
|
removed.append(f)
|
|
1221
|
removed.append(f)
|
|
1222
|
|
|
1222
|
|
|
1223
|
# update manifest
|
|
1223
|
# update manifest
|
|
1224
|
m1.update(new)
|
|
1224
|
m1.update(new)
|
|
1225
|
removed = [f for f in sorted(removed) if f in m1 or f in m2]
|
|
1225
|
removed = [f for f in sorted(removed) if f in m1 or f in m2]
|
|
1226
|
drop = [f for f in removed if f in m1]
|
|
1226
|
drop = [f for f in removed if f in m1]
|
|
1227
|
for f in drop:
|
|
1227
|
for f in drop:
|
|
1228
|
del m1[f]
|
|
1228
|
del m1[f]
|
|
1229
|
mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
|
|
1229
|
mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
|
|
1230
|
p2.manifestnode(), (new, drop))
|
|
1230
|
p2.manifestnode(), (new, drop))
|
|
1231
|
files = changed + removed
|
|
1231
|
files = changed + removed
|
|
1232
|
else:
|
|
1232
|
else:
|
|
1233
|
mn = p1.manifestnode()
|
|
1233
|
mn = p1.manifestnode()
|
|
1234
|
files = []
|
|
1234
|
files = []
|
|
1235
|
|
|
1235
|
|
|
1236
|
# update changelog
|
|
1236
|
# update changelog
|
|
1237
|
self.changelog.delayupdate()
|
|
1237
|
self.changelog.delayupdate()
|
|
1238
|
n = self.changelog.add(mn, files, ctx.description(),
|
|
1238
|
n = self.changelog.add(mn, files, ctx.description(),
|
|
1239
|
trp, p1.node(), p2.node(),
|
|
1239
|
trp, p1.node(), p2.node(),
|
|
1240
|
user, ctx.date(), ctx.extra().copy())
|
|
1240
|
user, ctx.date(), ctx.extra().copy())
|
|
1241
|
p = lambda: self.changelog.writepending() and self.root or ""
|
|
1241
|
p = lambda: self.changelog.writepending() and self.root or ""
|
|
1242
|
xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
|
|
1242
|
xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
|
|
1243
|
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
|
|
1243
|
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
|
|
1244
|
parent2=xp2, pending=p)
|
|
1244
|
parent2=xp2, pending=p)
|
|
1245
|
self.changelog.finalize(trp)
|
|
1245
|
self.changelog.finalize(trp)
|
|
|
|
|
1246
|
# ensure the new commit is 1-phase
|
|
|
|
|
1247
|
phases.retractboundary(self, 1, [n])
|
|
1246
|
tr.close()
|
|
1248
|
tr.close()
|
|
1247
|
|
|
1249
|
|
|
1248
|
if self._branchcache:
|
|
1250
|
if self._branchcache:
|
|
1249
|
self.updatebranchcache()
|
|
1251
|
self.updatebranchcache()
|
|
1250
|
return n
|
|
1252
|
return n
|
|
1251
|
finally:
|
|
1253
|
finally:
|
|
1252
|
if tr:
|
|
1254
|
if tr:
|
|
1253
|
tr.release()
|
|
1255
|
tr.release()
|
|
1254
|
lock.release()
|
|
1256
|
lock.release()
|
|
1255
|
|
|
1257
|
|
|
1256
|
def destroyed(self):
|
|
1258
|
def destroyed(self):
|
|
1257
|
'''Inform the repository that nodes have been destroyed.
|
|
1259
|
'''Inform the repository that nodes have been destroyed.
|
|
1258
|
Intended for use by strip and rollback, so there's a common
|
|
1260
|
Intended for use by strip and rollback, so there's a common
|
|
1259
|
place for anything that has to be done after destroying history.'''
|
|
1261
|
place for anything that has to be done after destroying history.'''
|
|
1260
|
# XXX it might be nice if we could take the list of destroyed
|
|
1262
|
# XXX it might be nice if we could take the list of destroyed
|
|
1261
|
# nodes, but I don't see an easy way for rollback() to do that
|
|
1263
|
# nodes, but I don't see an easy way for rollback() to do that
|
|
1262
|
|
|
1264
|
|
|
1263
|
# Ensure the persistent tag cache is updated. Doing it now
|
|
1265
|
# Ensure the persistent tag cache is updated. Doing it now
|
|
1264
|
# means that the tag cache only has to worry about destroyed
|
|
1266
|
# means that the tag cache only has to worry about destroyed
|
|
1265
|
# heads immediately after a strip/rollback. That in turn
|
|
1267
|
# heads immediately after a strip/rollback. That in turn
|
|
1266
|
# guarantees that "cachetip == currenttip" (comparing both rev
|
|
1268
|
# guarantees that "cachetip == currenttip" (comparing both rev
|
|
1267
|
# and node) always means no nodes have been added or destroyed.
|
|
1269
|
# and node) always means no nodes have been added or destroyed.
|
|
1268
|
|
|
1270
|
|
|
1269
|
# XXX this is suboptimal when qrefresh'ing: we strip the current
|
|
1271
|
# XXX this is suboptimal when qrefresh'ing: we strip the current
|
|
1270
|
# head, refresh the tag cache, then immediately add a new head.
|
|
1272
|
# head, refresh the tag cache, then immediately add a new head.
|
|
1271
|
# But I think doing it this way is necessary for the "instant
|
|
1273
|
# But I think doing it this way is necessary for the "instant
|
|
1272
|
# tag cache retrieval" case to work.
|
|
1274
|
# tag cache retrieval" case to work.
|
|
1273
|
self.invalidatecaches()
|
|
1275
|
self.invalidatecaches()
|
|
1274
|
|
|
1276
|
|
|
1275
|
def walk(self, match, node=None):
|
|
1277
|
def walk(self, match, node=None):
|
|
1276
|
'''
|
|
1278
|
'''
|
|
1277
|
walk recursively through the directory tree or a given
|
|
1279
|
walk recursively through the directory tree or a given
|
|
1278
|
changeset, finding all files matched by the match
|
|
1280
|
changeset, finding all files matched by the match
|
|
1279
|
function
|
|
1281
|
function
|
|
1280
|
'''
|
|
1282
|
'''
|
|
1281
|
return self[node].walk(match)
|
|
1283
|
return self[node].walk(match)
|
|
1282
|
|
|
1284
|
|
|
1283
|
def status(self, node1='.', node2=None, match=None,
|
|
1285
|
def status(self, node1='.', node2=None, match=None,
|
|
1284
|
ignored=False, clean=False, unknown=False,
|
|
1286
|
ignored=False, clean=False, unknown=False,
|
|
1285
|
listsubrepos=False):
|
|
1287
|
listsubrepos=False):
|
|
1286
|
"""return status of files between two nodes or node and working directory
|
|
1288
|
"""return status of files between two nodes or node and working directory
|
|
1287
|
|
|
1289
|
|
|
1288
|
If node1 is None, use the first dirstate parent instead.
|
|
1290
|
If node1 is None, use the first dirstate parent instead.
|
|
1289
|
If node2 is None, compare node1 with working directory.
|
|
1291
|
If node2 is None, compare node1 with working directory.
|
|
1290
|
"""
|
|
1292
|
"""
|
|
1291
|
|
|
1293
|
|
|
1292
|
def mfmatches(ctx):
|
|
1294
|
def mfmatches(ctx):
|
|
1293
|
mf = ctx.manifest().copy()
|
|
1295
|
mf = ctx.manifest().copy()
|
|
1294
|
for fn in mf.keys():
|
|
1296
|
for fn in mf.keys():
|
|
1295
|
if not match(fn):
|
|
1297
|
if not match(fn):
|
|
1296
|
del mf[fn]
|
|
1298
|
del mf[fn]
|
|
1297
|
return mf
|
|
1299
|
return mf
|
|
1298
|
|
|
1300
|
|
|
1299
|
if isinstance(node1, context.changectx):
|
|
1301
|
if isinstance(node1, context.changectx):
|
|
1300
|
ctx1 = node1
|
|
1302
|
ctx1 = node1
|
|
1301
|
else:
|
|
1303
|
else:
|
|
1302
|
ctx1 = self[node1]
|
|
1304
|
ctx1 = self[node1]
|
|
1303
|
if isinstance(node2, context.changectx):
|
|
1305
|
if isinstance(node2, context.changectx):
|
|
1304
|
ctx2 = node2
|
|
1306
|
ctx2 = node2
|
|
1305
|
else:
|
|
1307
|
else:
|
|
1306
|
ctx2 = self[node2]
|
|
1308
|
ctx2 = self[node2]
|
|
1307
|
|
|
1309
|
|
|
1308
|
working = ctx2.rev() is None
|
|
1310
|
working = ctx2.rev() is None
|
|
1309
|
parentworking = working and ctx1 == self['.']
|
|
1311
|
parentworking = working and ctx1 == self['.']
|
|
1310
|
match = match or matchmod.always(self.root, self.getcwd())
|
|
1312
|
match = match or matchmod.always(self.root, self.getcwd())
|
|
1311
|
listignored, listclean, listunknown = ignored, clean, unknown
|
|
1313
|
listignored, listclean, listunknown = ignored, clean, unknown
|
|
1312
|
|
|
1314
|
|
|
1313
|
# load earliest manifest first for caching reasons
|
|
1315
|
# load earliest manifest first for caching reasons
|
|
1314
|
if not working and ctx2.rev() < ctx1.rev():
|
|
1316
|
if not working and ctx2.rev() < ctx1.rev():
|
|
1315
|
ctx2.manifest()
|
|
1317
|
ctx2.manifest()
|
|
1316
|
|
|
1318
|
|
|
1317
|
if not parentworking:
|
|
1319
|
if not parentworking:
|
|
1318
|
def bad(f, msg):
|
|
1320
|
def bad(f, msg):
|
|
1319
|
if f not in ctx1:
|
|
1321
|
if f not in ctx1:
|
|
1320
|
self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
|
|
1322
|
self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
|
|
1321
|
match.bad = bad
|
|
1323
|
match.bad = bad
|
|
1322
|
|
|
1324
|
|
|
1323
|
if working: # we need to scan the working dir
|
|
1325
|
if working: # we need to scan the working dir
|
|
1324
|
subrepos = []
|
|
1326
|
subrepos = []
|
|
1325
|
if '.hgsub' in self.dirstate:
|
|
1327
|
if '.hgsub' in self.dirstate:
|
|
1326
|
subrepos = ctx2.substate.keys()
|
|
1328
|
subrepos = ctx2.substate.keys()
|
|
1327
|
s = self.dirstate.status(match, subrepos, listignored,
|
|
1329
|
s = self.dirstate.status(match, subrepos, listignored,
|
|
1328
|
listclean, listunknown)
|
|
1330
|
listclean, listunknown)
|
|
1329
|
cmp, modified, added, removed, deleted, unknown, ignored, clean = s
|
|
1331
|
cmp, modified, added, removed, deleted, unknown, ignored, clean = s
|
|
1330
|
|
|
1332
|
|
|
1331
|
# check for any possibly clean files
|
|
1333
|
# check for any possibly clean files
|
|
1332
|
if parentworking and cmp:
|
|
1334
|
if parentworking and cmp:
|
|
1333
|
fixup = []
|
|
1335
|
fixup = []
|
|
1334
|
# do a full compare of any files that might have changed
|
|
1336
|
# do a full compare of any files that might have changed
|
|
1335
|
for f in sorted(cmp):
|
|
1337
|
for f in sorted(cmp):
|
|
1336
|
if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
|
|
1338
|
if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
|
|
1337
|
or ctx1[f].cmp(ctx2[f])):
|
|
1339
|
or ctx1[f].cmp(ctx2[f])):
|
|
1338
|
modified.append(f)
|
|
1340
|
modified.append(f)
|
|
1339
|
else:
|
|
1341
|
else:
|
|
1340
|
fixup.append(f)
|
|
1342
|
fixup.append(f)
|
|
1341
|
|
|
1343
|
|
|
1342
|
# update dirstate for files that are actually clean
|
|
1344
|
# update dirstate for files that are actually clean
|
|
1343
|
if fixup:
|
|
1345
|
if fixup:
|
|
1344
|
if listclean:
|
|
1346
|
if listclean:
|
|
1345
|
clean += fixup
|
|
1347
|
clean += fixup
|
|
1346
|
|
|
1348
|
|
|
1347
|
try:
|
|
1349
|
try:
|
|
1348
|
# updating the dirstate is optional
|
|
1350
|
# updating the dirstate is optional
|
|
1349
|
# so we don't wait on the lock
|
|
1351
|
# so we don't wait on the lock
|
|
1350
|
wlock = self.wlock(False)
|
|
1352
|
wlock = self.wlock(False)
|
|
1351
|
try:
|
|
1353
|
try:
|
|
1352
|
for f in fixup:
|
|
1354
|
for f in fixup:
|
|
1353
|
self.dirstate.normal(f)
|
|
1355
|
self.dirstate.normal(f)
|
|
1354
|
finally:
|
|
1356
|
finally:
|
|
1355
|
wlock.release()
|
|
1357
|
wlock.release()
|
|
1356
|
except error.LockError:
|
|
1358
|
except error.LockError:
|
|
1357
|
pass
|
|
1359
|
pass
|
|
1358
|
|
|
1360
|
|
|
1359
|
if not parentworking:
|
|
1361
|
if not parentworking:
|
|
1360
|
mf1 = mfmatches(ctx1)
|
|
1362
|
mf1 = mfmatches(ctx1)
|
|
1361
|
if working:
|
|
1363
|
if working:
|
|
1362
|
# we are comparing working dir against non-parent
|
|
1364
|
# we are comparing working dir against non-parent
|
|
1363
|
# generate a pseudo-manifest for the working dir
|
|
1365
|
# generate a pseudo-manifest for the working dir
|
|
1364
|
mf2 = mfmatches(self['.'])
|
|
1366
|
mf2 = mfmatches(self['.'])
|
|
1365
|
for f in cmp + modified + added:
|
|
1367
|
for f in cmp + modified + added:
|
|
1366
|
mf2[f] = None
|
|
1368
|
mf2[f] = None
|
|
1367
|
mf2.set(f, ctx2.flags(f))
|
|
1369
|
mf2.set(f, ctx2.flags(f))
|
|
1368
|
for f in removed:
|
|
1370
|
for f in removed:
|
|
1369
|
if f in mf2:
|
|
1371
|
if f in mf2:
|
|
1370
|
del mf2[f]
|
|
1372
|
del mf2[f]
|
|
1371
|
else:
|
|
1373
|
else:
|
|
1372
|
# we are comparing two revisions
|
|
1374
|
# we are comparing two revisions
|
|
1373
|
deleted, unknown, ignored = [], [], []
|
|
1375
|
deleted, unknown, ignored = [], [], []
|
|
1374
|
mf2 = mfmatches(ctx2)
|
|
1376
|
mf2 = mfmatches(ctx2)
|
|
1375
|
|
|
1377
|
|
|
1376
|
modified, added, clean = [], [], []
|
|
1378
|
modified, added, clean = [], [], []
|
|
1377
|
for fn in mf2:
|
|
1379
|
for fn in mf2:
|
|
1378
|
if fn in mf1:
|
|
1380
|
if fn in mf1:
|
|
1379
|
if (fn not in deleted and
|
|
1381
|
if (fn not in deleted and
|
|
1380
|
(mf1.flags(fn) != mf2.flags(fn) or
|
|
1382
|
(mf1.flags(fn) != mf2.flags(fn) or
|
|
1381
|
(mf1[fn] != mf2[fn] and
|
|
1383
|
(mf1[fn] != mf2[fn] and
|
|
1382
|
(mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
|
|
1384
|
(mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
|
|
1383
|
modified.append(fn)
|
|
1385
|
modified.append(fn)
|
|
1384
|
elif listclean:
|
|
1386
|
elif listclean:
|
|
1385
|
clean.append(fn)
|
|
1387
|
clean.append(fn)
|
|
1386
|
del mf1[fn]
|
|
1388
|
del mf1[fn]
|
|
1387
|
elif fn not in deleted:
|
|
1389
|
elif fn not in deleted:
|
|
1388
|
added.append(fn)
|
|
1390
|
added.append(fn)
|
|
1389
|
removed = mf1.keys()
|
|
1391
|
removed = mf1.keys()
|
|
1390
|
|
|
1392
|
|
|
1391
|
if working and modified and not self.dirstate._checklink:
|
|
1393
|
if working and modified and not self.dirstate._checklink:
|
|
1392
|
# Symlink placeholders may get non-symlink-like contents
|
|
1394
|
# Symlink placeholders may get non-symlink-like contents
|
|
1393
|
# via user error or dereferencing by NFS or Samba servers,
|
|
1395
|
# via user error or dereferencing by NFS or Samba servers,
|
|
1394
|
# so we filter out any placeholders that don't look like a
|
|
1396
|
# so we filter out any placeholders that don't look like a
|
|
1395
|
# symlink
|
|
1397
|
# symlink
|
|
1396
|
sane = []
|
|
1398
|
sane = []
|
|
1397
|
for f in modified:
|
|
1399
|
for f in modified:
|
|
1398
|
if ctx2.flags(f) == 'l':
|
|
1400
|
if ctx2.flags(f) == 'l':
|
|
1399
|
d = ctx2[f].data()
|
|
1401
|
d = ctx2[f].data()
|
|
1400
|
if len(d) >= 1024 or '\n' in d or util.binary(d):
|
|
1402
|
if len(d) >= 1024 or '\n' in d or util.binary(d):
|
|
1401
|
self.ui.debug('ignoring suspect symlink placeholder'
|
|
1403
|
self.ui.debug('ignoring suspect symlink placeholder'
|
|
1402
|
' "%s"\n' % f)
|
|
1404
|
' "%s"\n' % f)
|
|
1403
|
continue
|
|
1405
|
continue
|
|
1404
|
sane.append(f)
|
|
1406
|
sane.append(f)
|
|
1405
|
modified = sane
|
|
1407
|
modified = sane
|
|
1406
|
|
|
1408
|
|
|
1407
|
r = modified, added, removed, deleted, unknown, ignored, clean
|
|
1409
|
r = modified, added, removed, deleted, unknown, ignored, clean
|
|
1408
|
|
|
1410
|
|
|
1409
|
if listsubrepos:
|
|
1411
|
if listsubrepos:
|
|
1410
|
for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
|
|
1412
|
for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
|
|
1411
|
if working:
|
|
1413
|
if working:
|
|
1412
|
rev2 = None
|
|
1414
|
rev2 = None
|
|
1413
|
else:
|
|
1415
|
else:
|
|
1414
|
rev2 = ctx2.substate[subpath][1]
|
|
1416
|
rev2 = ctx2.substate[subpath][1]
|
|
1415
|
try:
|
|
1417
|
try:
|
|
1416
|
submatch = matchmod.narrowmatcher(subpath, match)
|
|
1418
|
submatch = matchmod.narrowmatcher(subpath, match)
|
|
1417
|
s = sub.status(rev2, match=submatch, ignored=listignored,
|
|
1419
|
s = sub.status(rev2, match=submatch, ignored=listignored,
|
|
1418
|
clean=listclean, unknown=listunknown,
|
|
1420
|
clean=listclean, unknown=listunknown,
|
|
1419
|
listsubrepos=True)
|
|
1421
|
listsubrepos=True)
|
|
1420
|
for rfiles, sfiles in zip(r, s):
|
|
1422
|
for rfiles, sfiles in zip(r, s):
|
|
1421
|
rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
|
|
1423
|
rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
|
|
1422
|
except error.LookupError:
|
|
1424
|
except error.LookupError:
|
|
1423
|
self.ui.status(_("skipping missing subrepository: %s\n")
|
|
1425
|
self.ui.status(_("skipping missing subrepository: %s\n")
|
|
1424
|
% subpath)
|
|
1426
|
% subpath)
|
|
1425
|
|
|
1427
|
|
|
1426
|
for l in r:
|
|
1428
|
for l in r:
|
|
1427
|
l.sort()
|
|
1429
|
l.sort()
|
|
1428
|
return r
|
|
1430
|
return r
|
|
1429
|
|
|
1431
|
|
|
1430
|
def heads(self, start=None):
|
|
1432
|
def heads(self, start=None):
|
|
1431
|
heads = self.changelog.heads(start)
|
|
1433
|
heads = self.changelog.heads(start)
|
|
1432
|
# sort the output in rev descending order
|
|
1434
|
# sort the output in rev descending order
|
|
1433
|
return sorted(heads, key=self.changelog.rev, reverse=True)
|
|
1435
|
return sorted(heads, key=self.changelog.rev, reverse=True)
|
|
1434
|
|
|
1436
|
|
|
1435
|
def branchheads(self, branch=None, start=None, closed=False):
|
|
1437
|
def branchheads(self, branch=None, start=None, closed=False):
|
|
1436
|
'''return a (possibly filtered) list of heads for the given branch
|
|
1438
|
'''return a (possibly filtered) list of heads for the given branch
|
|
1437
|
|
|
1439
|
|
|
1438
|
Heads are returned in topological order, from newest to oldest.
|
|
1440
|
Heads are returned in topological order, from newest to oldest.
|
|
1439
|
If branch is None, use the dirstate branch.
|
|
1441
|
If branch is None, use the dirstate branch.
|
|
1440
|
If start is not None, return only heads reachable from start.
|
|
1442
|
If start is not None, return only heads reachable from start.
|
|
1441
|
If closed is True, return heads that are marked as closed as well.
|
|
1443
|
If closed is True, return heads that are marked as closed as well.
|
|
1442
|
'''
|
|
1444
|
'''
|
|
1443
|
if branch is None:
|
|
1445
|
if branch is None:
|
|
1444
|
branch = self[None].branch()
|
|
1446
|
branch = self[None].branch()
|
|
1445
|
branches = self.branchmap()
|
|
1447
|
branches = self.branchmap()
|
|
1446
|
if branch not in branches:
|
|
1448
|
if branch not in branches:
|
|
1447
|
return []
|
|
1449
|
return []
|
|
1448
|
# the cache returns heads ordered lowest to highest
|
|
1450
|
# the cache returns heads ordered lowest to highest
|
|
1449
|
bheads = list(reversed(branches[branch]))
|
|
1451
|
bheads = list(reversed(branches[branch]))
|
|
1450
|
if start is not None:
|
|
1452
|
if start is not None:
|
|
1451
|
# filter out the heads that cannot be reached from startrev
|
|
1453
|
# filter out the heads that cannot be reached from startrev
|
|
1452
|
fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
|
|
1454
|
fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
|
|
1453
|
bheads = [h for h in bheads if h in fbheads]
|
|
1455
|
bheads = [h for h in bheads if h in fbheads]
|
|
1454
|
if not closed:
|
|
1456
|
if not closed:
|
|
1455
|
bheads = [h for h in bheads if
|
|
1457
|
bheads = [h for h in bheads if
|
|
1456
|
('close' not in self.changelog.read(h)[5])]
|
|
1458
|
('close' not in self.changelog.read(h)[5])]
|
|
1457
|
return bheads
|
|
1459
|
return bheads
|
|
1458
|
|
|
1460
|
|
|
1459
|
def branches(self, nodes):
|
|
1461
|
def branches(self, nodes):
|
|
1460
|
if not nodes:
|
|
1462
|
if not nodes:
|
|
1461
|
nodes = [self.changelog.tip()]
|
|
1463
|
nodes = [self.changelog.tip()]
|
|
1462
|
b = []
|
|
1464
|
b = []
|
|
1463
|
for n in nodes:
|
|
1465
|
for n in nodes:
|
|
1464
|
t = n
|
|
1466
|
t = n
|
|
1465
|
while True:
|
|
1467
|
while True:
|
|
1466
|
p = self.changelog.parents(n)
|
|
1468
|
p = self.changelog.parents(n)
|
|
1467
|
if p[1] != nullid or p[0] == nullid:
|
|
1469
|
if p[1] != nullid or p[0] == nullid:
|
|
1468
|
b.append((t, n, p[0], p[1]))
|
|
1470
|
b.append((t, n, p[0], p[1]))
|
|
1469
|
break
|
|
1471
|
break
|
|
1470
|
n = p[0]
|
|
1472
|
n = p[0]
|
|
1471
|
return b
|
|
1473
|
return b
|
|
1472
|
|
|
1474
|
|
|
1473
|
def between(self, pairs):
|
|
1475
|
def between(self, pairs):
|
|
1474
|
r = []
|
|
1476
|
r = []
|
|
1475
|
|
|
1477
|
|
|
1476
|
for top, bottom in pairs:
|
|
1478
|
for top, bottom in pairs:
|
|
1477
|
n, l, i = top, [], 0
|
|
1479
|
n, l, i = top, [], 0
|
|
1478
|
f = 1
|
|
1480
|
f = 1
|
|
1479
|
|
|
1481
|
|
|
1480
|
while n != bottom and n != nullid:
|
|
1482
|
while n != bottom and n != nullid:
|
|
1481
|
p = self.changelog.parents(n)[0]
|
|
1483
|
p = self.changelog.parents(n)[0]
|
|
1482
|
if i == f:
|
|
1484
|
if i == f:
|
|
1483
|
l.append(n)
|
|
1485
|
l.append(n)
|
|
1484
|
f = f * 2
|
|
1486
|
f = f * 2
|
|
1485
|
n = p
|
|
1487
|
n = p
|
|
1486
|
i += 1
|
|
1488
|
i += 1
|
|
1487
|
|
|
1489
|
|
|
1488
|
r.append(l)
|
|
1490
|
r.append(l)
|
|
1489
|
|
|
1491
|
|
|
1490
|
return r
|
|
1492
|
return r
|
|
1491
|
|
|
1493
|
|
|
1492
|
def pull(self, remote, heads=None, force=False):
|
|
1494
|
def pull(self, remote, heads=None, force=False):
|
|
1493
|
lock = self.lock()
|
|
1495
|
lock = self.lock()
|
|
1494
|
try:
|
|
1496
|
try:
|
|
1495
|
tmp = discovery.findcommonincoming(self, remote, heads=heads,
|
|
1497
|
tmp = discovery.findcommonincoming(self, remote, heads=heads,
|
|
1496
|
force=force)
|
|
1498
|
force=force)
|
|
1497
|
common, fetch, rheads = tmp
|
|
1499
|
common, fetch, rheads = tmp
|
|
1498
|
if not fetch:
|
|
1500
|
if not fetch:
|
|
1499
|
self.ui.status(_("no changes found\n"))
|
|
1501
|
self.ui.status(_("no changes found\n"))
|
|
1500
|
result = 0
|
|
1502
|
result = 0
|
|
1501
|
else:
|
|
1503
|
else:
|
|
1502
|
if heads is None and list(common) == [nullid]:
|
|
1504
|
if heads is None and list(common) == [nullid]:
|
|
1503
|
self.ui.status(_("requesting all changes\n"))
|
|
1505
|
self.ui.status(_("requesting all changes\n"))
|
|
1504
|
elif heads is None and remote.capable('changegroupsubset'):
|
|
1506
|
elif heads is None and remote.capable('changegroupsubset'):
|
|
1505
|
# issue1320, avoid a race if remote changed after discovery
|
|
1507
|
# issue1320, avoid a race if remote changed after discovery
|
|
1506
|
heads = rheads
|
|
1508
|
heads = rheads
|
|
1507
|
|
|
1509
|
|
|
1508
|
if remote.capable('getbundle'):
|
|
1510
|
if remote.capable('getbundle'):
|
|
1509
|
cg = remote.getbundle('pull', common=common,
|
|
1511
|
cg = remote.getbundle('pull', common=common,
|
|
1510
|
heads=heads or rheads)
|
|
1512
|
heads=heads or rheads)
|
|
1511
|
elif heads is None:
|
|
1513
|
elif heads is None:
|
|
1512
|
cg = remote.changegroup(fetch, 'pull')
|
|
1514
|
cg = remote.changegroup(fetch, 'pull')
|
|
1513
|
elif not remote.capable('changegroupsubset'):
|
|
1515
|
elif not remote.capable('changegroupsubset'):
|
|
1514
|
raise util.Abort(_("partial pull cannot be done because "
|
|
1516
|
raise util.Abort(_("partial pull cannot be done because "
|
|
1515
|
"other repository doesn't support "
|
|
1517
|
"other repository doesn't support "
|
|
1516
|
"changegroupsubset."))
|
|
1518
|
"changegroupsubset."))
|
|
1517
|
else:
|
|
1519
|
else:
|
|
1518
|
cg = remote.changegroupsubset(fetch, heads, 'pull')
|
|
1520
|
cg = remote.changegroupsubset(fetch, heads, 'pull')
|
|
1519
|
result = self.addchangegroup(cg, 'pull', remote.url(),
|
|
1521
|
result = self.addchangegroup(cg, 'pull', remote.url(),
|
|
1520
|
lock=lock)
|
|
1522
|
lock=lock)
|
|
1521
|
finally:
|
|
1523
|
finally:
|
|
1522
|
lock.release()
|
|
1524
|
lock.release()
|
|
1523
|
|
|
1525
|
|
|
1524
|
return result
|
|
1526
|
return result
|
|
1525
|
|
|
1527
|
|
|
1526
|
def checkpush(self, force, revs):
|
|
1528
|
def checkpush(self, force, revs):
|
|
1527
|
"""Extensions can override this function if additional checks have
|
|
1529
|
"""Extensions can override this function if additional checks have
|
|
1528
|
to be performed before pushing, or call it if they override push
|
|
1530
|
to be performed before pushing, or call it if they override push
|
|
1529
|
command.
|
|
1531
|
command.
|
|
1530
|
"""
|
|
1532
|
"""
|
|
1531
|
pass
|
|
1533
|
pass
|
|
1532
|
|
|
1534
|
|
|
1533
|
def push(self, remote, force=False, revs=None, newbranch=False):
|
|
1535
|
def push(self, remote, force=False, revs=None, newbranch=False):
|
|
1534
|
'''Push outgoing changesets (limited by revs) from the current
|
|
1536
|
'''Push outgoing changesets (limited by revs) from the current
|
|
1535
|
repository to remote. Return an integer:
|
|
1537
|
repository to remote. Return an integer:
|
|
1536
|
- 0 means HTTP error *or* nothing to push
|
|
1538
|
- 0 means HTTP error *or* nothing to push
|
|
1537
|
- 1 means we pushed and remote head count is unchanged *or*
|
|
1539
|
- 1 means we pushed and remote head count is unchanged *or*
|
|
1538
|
we have outgoing changesets but refused to push
|
|
1540
|
we have outgoing changesets but refused to push
|
|
1539
|
- other values as described by addchangegroup()
|
|
1541
|
- other values as described by addchangegroup()
|
|
1540
|
'''
|
|
1542
|
'''
|
|
1541
|
# there are two ways to push to remote repo:
|
|
1543
|
# there are two ways to push to remote repo:
|
|
1542
|
#
|
|
1544
|
#
|
|
1543
|
# addchangegroup assumes local user can lock remote
|
|
1545
|
# addchangegroup assumes local user can lock remote
|
|
1544
|
# repo (local filesystem, old ssh servers).
|
|
1546
|
# repo (local filesystem, old ssh servers).
|
|
1545
|
#
|
|
1547
|
#
|
|
1546
|
# unbundle assumes local user cannot lock remote repo (new ssh
|
|
1548
|
# unbundle assumes local user cannot lock remote repo (new ssh
|
|
1547
|
# servers, http servers).
|
|
1549
|
# servers, http servers).
|
|
1548
|
|
|
1550
|
|
|
1549
|
self.checkpush(force, revs)
|
|
1551
|
self.checkpush(force, revs)
|
|
1550
|
lock = None
|
|
1552
|
lock = None
|
|
1551
|
unbundle = remote.capable('unbundle')
|
|
1553
|
unbundle = remote.capable('unbundle')
|
|
1552
|
if not unbundle:
|
|
1554
|
if not unbundle:
|
|
1553
|
lock = remote.lock()
|
|
1555
|
lock = remote.lock()
|
|
1554
|
try:
|
|
1556
|
try:
|
|
1555
|
cg, remote_heads = discovery.prepush(self, remote, force, revs,
|
|
1557
|
cg, remote_heads = discovery.prepush(self, remote, force, revs,
|
|
1556
|
newbranch)
|
|
1558
|
newbranch)
|
|
1557
|
ret = remote_heads
|
|
1559
|
ret = remote_heads
|
|
1558
|
if cg is not None:
|
|
1560
|
if cg is not None:
|
|
1559
|
if unbundle:
|
|
1561
|
if unbundle:
|
|
1560
|
# local repo finds heads on server, finds out what
|
|
1562
|
# local repo finds heads on server, finds out what
|
|
1561
|
# revs it must push. once revs transferred, if server
|
|
1563
|
# revs it must push. once revs transferred, if server
|
|
1562
|
# finds it has different heads (someone else won
|
|
1564
|
# finds it has different heads (someone else won
|
|
1563
|
# commit/push race), server aborts.
|
|
1565
|
# commit/push race), server aborts.
|
|
1564
|
if force:
|
|
1566
|
if force:
|
|
1565
|
remote_heads = ['force']
|
|
1567
|
remote_heads = ['force']
|
|
1566
|
# ssh: return remote's addchangegroup()
|
|
1568
|
# ssh: return remote's addchangegroup()
|
|
1567
|
# http: return remote's addchangegroup() or 0 for error
|
|
1569
|
# http: return remote's addchangegroup() or 0 for error
|
|
1568
|
ret = remote.unbundle(cg, remote_heads, 'push')
|
|
1570
|
ret = remote.unbundle(cg, remote_heads, 'push')
|
|
1569
|
else:
|
|
1571
|
else:
|
|
1570
|
# we return an integer indicating remote head count change
|
|
1572
|
# we return an integer indicating remote head count change
|
|
1571
|
ret = remote.addchangegroup(cg, 'push', self.url(),
|
|
1573
|
ret = remote.addchangegroup(cg, 'push', self.url(),
|
|
1572
|
lock=lock)
|
|
1574
|
lock=lock)
|
|
1573
|
finally:
|
|
1575
|
finally:
|
|
1574
|
if lock is not None:
|
|
1576
|
if lock is not None:
|
|
1575
|
lock.release()
|
|
1577
|
lock.release()
|
|
1576
|
|
|
1578
|
|
|
1577
|
self.ui.debug("checking for updated bookmarks\n")
|
|
1579
|
self.ui.debug("checking for updated bookmarks\n")
|
|
1578
|
rb = remote.listkeys('bookmarks')
|
|
1580
|
rb = remote.listkeys('bookmarks')
|
|
1579
|
for k in rb.keys():
|
|
1581
|
for k in rb.keys():
|
|
1580
|
if k in self._bookmarks:
|
|
1582
|
if k in self._bookmarks:
|
|
1581
|
nr, nl = rb[k], hex(self._bookmarks[k])
|
|
1583
|
nr, nl = rb[k], hex(self._bookmarks[k])
|
|
1582
|
if nr in self:
|
|
1584
|
if nr in self:
|
|
1583
|
cr = self[nr]
|
|
1585
|
cr = self[nr]
|
|
1584
|
cl = self[nl]
|
|
1586
|
cl = self[nl]
|
|
1585
|
if cl in cr.descendants():
|
|
1587
|
if cl in cr.descendants():
|
|
1586
|
r = remote.pushkey('bookmarks', k, nr, nl)
|
|
1588
|
r = remote.pushkey('bookmarks', k, nr, nl)
|
|
1587
|
if r:
|
|
1589
|
if r:
|
|
1588
|
self.ui.status(_("updating bookmark %s\n") % k)
|
|
1590
|
self.ui.status(_("updating bookmark %s\n") % k)
|
|
1589
|
else:
|
|
1591
|
else:
|
|
1590
|
self.ui.warn(_('updating bookmark %s'
|
|
1592
|
self.ui.warn(_('updating bookmark %s'
|
|
1591
|
' failed!\n') % k)
|
|
1593
|
' failed!\n') % k)
|
|
1592
|
|
|
1594
|
|
|
1593
|
return ret
|
|
1595
|
return ret
|
|
1594
|
|
|
1596
|
|
|
1595
|
def changegroupinfo(self, nodes, source):
|
|
1597
|
def changegroupinfo(self, nodes, source):
|
|
1596
|
if self.ui.verbose or source == 'bundle':
|
|
1598
|
if self.ui.verbose or source == 'bundle':
|
|
1597
|
self.ui.status(_("%d changesets found\n") % len(nodes))
|
|
1599
|
self.ui.status(_("%d changesets found\n") % len(nodes))
|
|
1598
|
if self.ui.debugflag:
|
|
1600
|
if self.ui.debugflag:
|
|
1599
|
self.ui.debug("list of changesets:\n")
|
|
1601
|
self.ui.debug("list of changesets:\n")
|
|
1600
|
for node in nodes:
|
|
1602
|
for node in nodes:
|
|
1601
|
self.ui.debug("%s\n" % hex(node))
|
|
1603
|
self.ui.debug("%s\n" % hex(node))
|
|
1602
|
|
|
1604
|
|
|
1603
|
def changegroupsubset(self, bases, heads, source):
|
|
1605
|
def changegroupsubset(self, bases, heads, source):
|
|
1604
|
"""Compute a changegroup consisting of all the nodes that are
|
|
1606
|
"""Compute a changegroup consisting of all the nodes that are
|
|
1605
|
descendants of any of the bases and ancestors of any of the heads.
|
|
1607
|
descendants of any of the bases and ancestors of any of the heads.
|
|
1606
|
Return a chunkbuffer object whose read() method will return
|
|
1608
|
Return a chunkbuffer object whose read() method will return
|
|
1607
|
successive changegroup chunks.
|
|
1609
|
successive changegroup chunks.
|
|
1608
|
|
|
1610
|
|
|
1609
|
It is fairly complex as determining which filenodes and which
|
|
1611
|
It is fairly complex as determining which filenodes and which
|
|
1610
|
manifest nodes need to be included for the changeset to be complete
|
|
1612
|
manifest nodes need to be included for the changeset to be complete
|
|
1611
|
is non-trivial.
|
|
1613
|
is non-trivial.
|
|
1612
|
|
|
1614
|
|
|
1613
|
Another wrinkle is doing the reverse, figuring out which changeset in
|
|
1615
|
Another wrinkle is doing the reverse, figuring out which changeset in
|
|
1614
|
the changegroup a particular filenode or manifestnode belongs to.
|
|
1616
|
the changegroup a particular filenode or manifestnode belongs to.
|
|
1615
|
"""
|
|
1617
|
"""
|
|
1616
|
cl = self.changelog
|
|
1618
|
cl = self.changelog
|
|
1617
|
if not bases:
|
|
1619
|
if not bases:
|
|
1618
|
bases = [nullid]
|
|
1620
|
bases = [nullid]
|
|
1619
|
csets, bases, heads = cl.nodesbetween(bases, heads)
|
|
1621
|
csets, bases, heads = cl.nodesbetween(bases, heads)
|
|
1620
|
# We assume that all ancestors of bases are known
|
|
1622
|
# We assume that all ancestors of bases are known
|
|
1621
|
common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
|
|
1623
|
common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
|
|
1622
|
return self._changegroupsubset(common, csets, heads, source)
|
|
1624
|
return self._changegroupsubset(common, csets, heads, source)
|
|
1623
|
|
|
1625
|
|
|
1624
|
def getbundle(self, source, heads=None, common=None):
|
|
1626
|
def getbundle(self, source, heads=None, common=None):
|
|
1625
|
"""Like changegroupsubset, but returns the set difference between the
|
|
1627
|
"""Like changegroupsubset, but returns the set difference between the
|
|
1626
|
ancestors of heads and the ancestors common.
|
|
1628
|
ancestors of heads and the ancestors common.
|
|
1627
|
|
|
1629
|
|
|
1628
|
If heads is None, use the local heads. If common is None, use [nullid].
|
|
1630
|
If heads is None, use the local heads. If common is None, use [nullid].
|
|
1629
|
|
|
1631
|
|
|
1630
|
The nodes in common might not all be known locally due to the way the
|
|
1632
|
The nodes in common might not all be known locally due to the way the
|
|
1631
|
current discovery protocol works.
|
|
1633
|
current discovery protocol works.
|
|
1632
|
"""
|
|
1634
|
"""
|
|
1633
|
cl = self.changelog
|
|
1635
|
cl = self.changelog
|
|
1634
|
if common:
|
|
1636
|
if common:
|
|
1635
|
nm = cl.nodemap
|
|
1637
|
nm = cl.nodemap
|
|
1636
|
common = [n for n in common if n in nm]
|
|
1638
|
common = [n for n in common if n in nm]
|
|
1637
|
else:
|
|
1639
|
else:
|
|
1638
|
common = [nullid]
|
|
1640
|
common = [nullid]
|
|
1639
|
if not heads:
|
|
1641
|
if not heads:
|
|
1640
|
heads = cl.heads()
|
|
1642
|
heads = cl.heads()
|
|
1641
|
common, missing = cl.findcommonmissing(common, heads)
|
|
1643
|
common, missing = cl.findcommonmissing(common, heads)
|
|
1642
|
if not missing:
|
|
1644
|
if not missing:
|
|
1643
|
return None
|
|
1645
|
return None
|
|
1644
|
return self._changegroupsubset(common, missing, heads, source)
|
|
1646
|
return self._changegroupsubset(common, missing, heads, source)
|
|
1645
|
|
|
1647
|
|
|
1646
|
def _changegroupsubset(self, commonrevs, csets, heads, source):
|
|
1648
|
def _changegroupsubset(self, commonrevs, csets, heads, source):
|
|
1647
|
|
|
1649
|
|
|
1648
|
cl = self.changelog
|
|
1650
|
cl = self.changelog
|
|
1649
|
mf = self.manifest
|
|
1651
|
mf = self.manifest
|
|
1650
|
mfs = {} # needed manifests
|
|
1652
|
mfs = {} # needed manifests
|
|
1651
|
fnodes = {} # needed file nodes
|
|
1653
|
fnodes = {} # needed file nodes
|
|
1652
|
changedfiles = set()
|
|
1654
|
changedfiles = set()
|
|
1653
|
fstate = ['', {}]
|
|
1655
|
fstate = ['', {}]
|
|
1654
|
count = [0]
|
|
1656
|
count = [0]
|
|
1655
|
|
|
1657
|
|
|
1656
|
# can we go through the fast path ?
|
|
1658
|
# can we go through the fast path ?
|
|
1657
|
heads.sort()
|
|
1659
|
heads.sort()
|
|
1658
|
if heads == sorted(self.heads()):
|
|
1660
|
if heads == sorted(self.heads()):
|
|
1659
|
return self._changegroup(csets, source)
|
|
1661
|
return self._changegroup(csets, source)
|
|
1660
|
|
|
1662
|
|
|
1661
|
# slow path
|
|
1663
|
# slow path
|
|
1662
|
self.hook('preoutgoing', throw=True, source=source)
|
|
1664
|
self.hook('preoutgoing', throw=True, source=source)
|
|
1663
|
self.changegroupinfo(csets, source)
|
|
1665
|
self.changegroupinfo(csets, source)
|
|
1664
|
|
|
1666
|
|
|
1665
|
# filter any nodes that claim to be part of the known set
|
|
1667
|
# filter any nodes that claim to be part of the known set
|
|
1666
|
def prune(revlog, missing):
|
|
1668
|
def prune(revlog, missing):
|
|
1667
|
return [n for n in missing
|
|
1669
|
return [n for n in missing
|
|
1668
|
if revlog.linkrev(revlog.rev(n)) not in commonrevs]
|
|
1670
|
if revlog.linkrev(revlog.rev(n)) not in commonrevs]
|
|
1669
|
|
|
1671
|
|
|
1670
|
def lookup(revlog, x):
|
|
1672
|
def lookup(revlog, x):
|
|
1671
|
if revlog == cl:
|
|
1673
|
if revlog == cl:
|
|
1672
|
c = cl.read(x)
|
|
1674
|
c = cl.read(x)
|
|
1673
|
changedfiles.update(c[3])
|
|
1675
|
changedfiles.update(c[3])
|
|
1674
|
mfs.setdefault(c[0], x)
|
|
1676
|
mfs.setdefault(c[0], x)
|
|
1675
|
count[0] += 1
|
|
1677
|
count[0] += 1
|
|
1676
|
self.ui.progress(_('bundling'), count[0],
|
|
1678
|
self.ui.progress(_('bundling'), count[0],
|
|
1677
|
unit=_('changesets'), total=len(csets))
|
|
1679
|
unit=_('changesets'), total=len(csets))
|
|
1678
|
return x
|
|
1680
|
return x
|
|
1679
|
elif revlog == mf:
|
|
1681
|
elif revlog == mf:
|
|
1680
|
clnode = mfs[x]
|
|
1682
|
clnode = mfs[x]
|
|
1681
|
mdata = mf.readfast(x)
|
|
1683
|
mdata = mf.readfast(x)
|
|
1682
|
for f in changedfiles:
|
|
1684
|
for f in changedfiles:
|
|
1683
|
if f in mdata:
|
|
1685
|
if f in mdata:
|
|
1684
|
fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
|
|
1686
|
fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
|
|
1685
|
count[0] += 1
|
|
1687
|
count[0] += 1
|
|
1686
|
self.ui.progress(_('bundling'), count[0],
|
|
1688
|
self.ui.progress(_('bundling'), count[0],
|
|
1687
|
unit=_('manifests'), total=len(mfs))
|
|
1689
|
unit=_('manifests'), total=len(mfs))
|
|
1688
|
return mfs[x]
|
|
1690
|
return mfs[x]
|
|
1689
|
else:
|
|
1691
|
else:
|
|
1690
|
self.ui.progress(
|
|
1692
|
self.ui.progress(
|
|
1691
|
_('bundling'), count[0], item=fstate[0],
|
|
1693
|
_('bundling'), count[0], item=fstate[0],
|
|
1692
|
unit=_('files'), total=len(changedfiles))
|
|
1694
|
unit=_('files'), total=len(changedfiles))
|
|
1693
|
return fstate[1][x]
|
|
1695
|
return fstate[1][x]
|
|
1694
|
|
|
1696
|
|
|
1695
|
bundler = changegroup.bundle10(lookup)
|
|
1697
|
bundler = changegroup.bundle10(lookup)
|
|
1696
|
reorder = self.ui.config('bundle', 'reorder', 'auto')
|
|
1698
|
reorder = self.ui.config('bundle', 'reorder', 'auto')
|
|
1697
|
if reorder == 'auto':
|
|
1699
|
if reorder == 'auto':
|
|
1698
|
reorder = None
|
|
1700
|
reorder = None
|
|
1699
|
else:
|
|
1701
|
else:
|
|
1700
|
reorder = util.parsebool(reorder)
|
|
1702
|
reorder = util.parsebool(reorder)
|
|
1701
|
|
|
1703
|
|
|
1702
|
def gengroup():
|
|
1704
|
def gengroup():
|
|
1703
|
# Create a changenode group generator that will call our functions
|
|
1705
|
# Create a changenode group generator that will call our functions
|
|
1704
|
# back to lookup the owning changenode and collect information.
|
|
1706
|
# back to lookup the owning changenode and collect information.
|
|
1705
|
for chunk in cl.group(csets, bundler, reorder=reorder):
|
|
1707
|
for chunk in cl.group(csets, bundler, reorder=reorder):
|
|
1706
|
yield chunk
|
|
1708
|
yield chunk
|
|
1707
|
self.ui.progress(_('bundling'), None)
|
|
1709
|
self.ui.progress(_('bundling'), None)
|
|
1708
|
|
|
1710
|
|
|
1709
|
# Create a generator for the manifestnodes that calls our lookup
|
|
1711
|
# Create a generator for the manifestnodes that calls our lookup
|
|
1710
|
# and data collection functions back.
|
|
1712
|
# and data collection functions back.
|
|
1711
|
count[0] = 0
|
|
1713
|
count[0] = 0
|
|
1712
|
for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
|
|
1714
|
for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
|
|
1713
|
yield chunk
|
|
1715
|
yield chunk
|
|
1714
|
self.ui.progress(_('bundling'), None)
|
|
1716
|
self.ui.progress(_('bundling'), None)
|
|
1715
|
|
|
1717
|
|
|
1716
|
mfs.clear()
|
|
1718
|
mfs.clear()
|
|
1717
|
|
|
1719
|
|
|
1718
|
# Go through all our files in order sorted by name.
|
|
1720
|
# Go through all our files in order sorted by name.
|
|
1719
|
count[0] = 0
|
|
1721
|
count[0] = 0
|
|
1720
|
for fname in sorted(changedfiles):
|
|
1722
|
for fname in sorted(changedfiles):
|
|
1721
|
filerevlog = self.file(fname)
|
|
1723
|
filerevlog = self.file(fname)
|
|
1722
|
if not len(filerevlog):
|
|
1724
|
if not len(filerevlog):
|
|
1723
|
raise util.Abort(_("empty or missing revlog for %s") % fname)
|
|
1725
|
raise util.Abort(_("empty or missing revlog for %s") % fname)
|
|
1724
|
fstate[0] = fname
|
|
1726
|
fstate[0] = fname
|
|
1725
|
fstate[1] = fnodes.pop(fname, {})
|
|
1727
|
fstate[1] = fnodes.pop(fname, {})
|
|
1726
|
|
|
1728
|
|
|
1727
|
nodelist = prune(filerevlog, fstate[1])
|
|
1729
|
nodelist = prune(filerevlog, fstate[1])
|
|
1728
|
if nodelist:
|
|
1730
|
if nodelist:
|
|
1729
|
count[0] += 1
|
|
1731
|
count[0] += 1
|
|
1730
|
yield bundler.fileheader(fname)
|
|
1732
|
yield bundler.fileheader(fname)
|
|
1731
|
for chunk in filerevlog.group(nodelist, bundler, reorder):
|
|
1733
|
for chunk in filerevlog.group(nodelist, bundler, reorder):
|
|
1732
|
yield chunk
|
|
1734
|
yield chunk
|
|
1733
|
|
|
1735
|
|
|
1734
|
# Signal that no more groups are left.
|
|
1736
|
# Signal that no more groups are left.
|
|
1735
|
yield bundler.close()
|
|
1737
|
yield bundler.close()
|
|
1736
|
self.ui.progress(_('bundling'), None)
|
|
1738
|
self.ui.progress(_('bundling'), None)
|
|
1737
|
|
|
1739
|
|
|
1738
|
if csets:
|
|
1740
|
if csets:
|
|
1739
|
self.hook('outgoing', node=hex(csets[0]), source=source)
|
|
1741
|
self.hook('outgoing', node=hex(csets[0]), source=source)
|
|
1740
|
|
|
1742
|
|
|
1741
|
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
|
|
1743
|
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
|
|
1742
|
|
|
1744
|
|
|
1743
|
def changegroup(self, basenodes, source):
|
|
1745
|
def changegroup(self, basenodes, source):
|
|
1744
|
# to avoid a race we use changegroupsubset() (issue1320)
|
|
1746
|
# to avoid a race we use changegroupsubset() (issue1320)
|
|
1745
|
return self.changegroupsubset(basenodes, self.heads(), source)
|
|
1747
|
return self.changegroupsubset(basenodes, self.heads(), source)
|
|
1746
|
|
|
1748
|
|
|
1747
|
def _changegroup(self, nodes, source):
|
|
1749
|
def _changegroup(self, nodes, source):
|
|
1748
|
"""Compute the changegroup of all nodes that we have that a recipient
|
|
1750
|
"""Compute the changegroup of all nodes that we have that a recipient
|
|
1749
|
doesn't. Return a chunkbuffer object whose read() method will return
|
|
1751
|
doesn't. Return a chunkbuffer object whose read() method will return
|
|
1750
|
successive changegroup chunks.
|
|
1752
|
successive changegroup chunks.
|
|
1751
|
|
|
1753
|
|
|
1752
|
This is much easier than the previous function as we can assume that
|
|
1754
|
This is much easier than the previous function as we can assume that
|
|
1753
|
the recipient has any changenode we aren't sending them.
|
|
1755
|
the recipient has any changenode we aren't sending them.
|
|
1754
|
|
|
1756
|
|
|
1755
|
nodes is the set of nodes to send"""
|
|
1757
|
nodes is the set of nodes to send"""
|
|
1756
|
|
|
1758
|
|
|
1757
|
cl = self.changelog
|
|
1759
|
cl = self.changelog
|
|
1758
|
mf = self.manifest
|
|
1760
|
mf = self.manifest
|
|
1759
|
mfs = {}
|
|
1761
|
mfs = {}
|
|
1760
|
changedfiles = set()
|
|
1762
|
changedfiles = set()
|
|
1761
|
fstate = ['']
|
|
1763
|
fstate = ['']
|
|
1762
|
count = [0]
|
|
1764
|
count = [0]
|
|
1763
|
|
|
1765
|
|
|
1764
|
self.hook('preoutgoing', throw=True, source=source)
|
|
1766
|
self.hook('preoutgoing', throw=True, source=source)
|
|
1765
|
self.changegroupinfo(nodes, source)
|
|
1767
|
self.changegroupinfo(nodes, source)
|
|
1766
|
|
|
1768
|
|
|
1767
|
revset = set([cl.rev(n) for n in nodes])
|
|
1769
|
revset = set([cl.rev(n) for n in nodes])
|
|
1768
|
|
|
1770
|
|
|
1769
|
def gennodelst(log):
|
|
1771
|
def gennodelst(log):
|
|
1770
|
return [log.node(r) for r in log if log.linkrev(r) in revset]
|
|
1772
|
return [log.node(r) for r in log if log.linkrev(r) in revset]
|
|
1771
|
|
|
1773
|
|
|
1772
|
def lookup(revlog, x):
|
|
1774
|
def lookup(revlog, x):
|
|
1773
|
if revlog == cl:
|
|
1775
|
if revlog == cl:
|
|
1774
|
c = cl.read(x)
|
|
1776
|
c = cl.read(x)
|
|
1775
|
changedfiles.update(c[3])
|
|
1777
|
changedfiles.update(c[3])
|
|
1776
|
mfs.setdefault(c[0], x)
|
|
1778
|
mfs.setdefault(c[0], x)
|
|
1777
|
count[0] += 1
|
|
1779
|
count[0] += 1
|
|
1778
|
self.ui.progress(_('bundling'), count[0],
|
|
1780
|
self.ui.progress(_('bundling'), count[0],
|
|
1779
|
unit=_('changesets'), total=len(nodes))
|
|
1781
|
unit=_('changesets'), total=len(nodes))
|
|
1780
|
return x
|
|
1782
|
return x
|
|
1781
|
elif revlog == mf:
|
|
1783
|
elif revlog == mf:
|
|
1782
|
count[0] += 1
|
|
1784
|
count[0] += 1
|
|
1783
|
self.ui.progress(_('bundling'), count[0],
|
|
1785
|
self.ui.progress(_('bundling'), count[0],
|
|
1784
|
unit=_('manifests'), total=len(mfs))
|
|
1786
|
unit=_('manifests'), total=len(mfs))
|
|
1785
|
return cl.node(revlog.linkrev(revlog.rev(x)))
|
|
1787
|
return cl.node(revlog.linkrev(revlog.rev(x)))
|
|
1786
|
else:
|
|
1788
|
else:
|
|
1787
|
self.ui.progress(
|
|
1789
|
self.ui.progress(
|
|
1788
|
_('bundling'), count[0], item=fstate[0],
|
|
1790
|
_('bundling'), count[0], item=fstate[0],
|
|
1789
|
total=len(changedfiles), unit=_('files'))
|
|
1791
|
total=len(changedfiles), unit=_('files'))
|
|
1790
|
return cl.node(revlog.linkrev(revlog.rev(x)))
|
|
1792
|
return cl.node(revlog.linkrev(revlog.rev(x)))
|
|
1791
|
|
|
1793
|
|
|
1792
|
bundler = changegroup.bundle10(lookup)
|
|
1794
|
bundler = changegroup.bundle10(lookup)
|
|
1793
|
reorder = self.ui.config('bundle', 'reorder', 'auto')
|
|
1795
|
reorder = self.ui.config('bundle', 'reorder', 'auto')
|
|
1794
|
if reorder == 'auto':
|
|
1796
|
if reorder == 'auto':
|
|
1795
|
reorder = None
|
|
1797
|
reorder = None
|
|
1796
|
else:
|
|
1798
|
else:
|
|
1797
|
reorder = util.parsebool(reorder)
|
|
1799
|
reorder = util.parsebool(reorder)
|
|
1798
|
|
|
1800
|
|
|
1799
|
def gengroup():
|
|
1801
|
def gengroup():
|
|
1800
|
'''yield a sequence of changegroup chunks (strings)'''
|
|
1802
|
'''yield a sequence of changegroup chunks (strings)'''
|
|
1801
|
# construct a list of all changed files
|
|
1803
|
# construct a list of all changed files
|
|
1802
|
|
|
1804
|
|
|
1803
|
for chunk in cl.group(nodes, bundler, reorder=reorder):
|
|
1805
|
for chunk in cl.group(nodes, bundler, reorder=reorder):
|
|
1804
|
yield chunk
|
|
1806
|
yield chunk
|
|
1805
|
self.ui.progress(_('bundling'), None)
|
|
1807
|
self.ui.progress(_('bundling'), None)
|
|
1806
|
|
|
1808
|
|
|
1807
|
count[0] = 0
|
|
1809
|
count[0] = 0
|
|
1808
|
for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
|
|
1810
|
for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
|
|
1809
|
yield chunk
|
|
1811
|
yield chunk
|
|
1810
|
self.ui.progress(_('bundling'), None)
|
|
1812
|
self.ui.progress(_('bundling'), None)
|
|
1811
|
|
|
1813
|
|
|
1812
|
count[0] = 0
|
|
1814
|
count[0] = 0
|
|
1813
|
for fname in sorted(changedfiles):
|
|
1815
|
for fname in sorted(changedfiles):
|
|
1814
|
filerevlog = self.file(fname)
|
|
1816
|
filerevlog = self.file(fname)
|
|
1815
|
if not len(filerevlog):
|
|
1817
|
if not len(filerevlog):
|
|
1816
|
raise util.Abort(_("empty or missing revlog for %s") % fname)
|
|
1818
|
raise util.Abort(_("empty or missing revlog for %s") % fname)
|
|
1817
|
fstate[0] = fname
|
|
1819
|
fstate[0] = fname
|
|
1818
|
nodelist = gennodelst(filerevlog)
|
|
1820
|
nodelist = gennodelst(filerevlog)
|
|
1819
|
if nodelist:
|
|
1821
|
if nodelist:
|
|
1820
|
count[0] += 1
|
|
1822
|
count[0] += 1
|
|
1821
|
yield bundler.fileheader(fname)
|
|
1823
|
yield bundler.fileheader(fname)
|
|
1822
|
for chunk in filerevlog.group(nodelist, bundler, reorder):
|
|
1824
|
for chunk in filerevlog.group(nodelist, bundler, reorder):
|
|
1823
|
yield chunk
|
|
1825
|
yield chunk
|
|
1824
|
yield bundler.close()
|
|
1826
|
yield bundler.close()
|
|
1825
|
self.ui.progress(_('bundling'), None)
|
|
1827
|
self.ui.progress(_('bundling'), None)
|
|
1826
|
|
|
1828
|
|
|
1827
|
if nodes:
|
|
1829
|
if nodes:
|
|
1828
|
self.hook('outgoing', node=hex(nodes[0]), source=source)
|
|
1830
|
self.hook('outgoing', node=hex(nodes[0]), source=source)
|
|
1829
|
|
|
1831
|
|
|
1830
|
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
|
|
1832
|
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
|
|
1831
|
|
|
1833
|
|
|
1832
|
def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
|
|
1834
|
def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
|
|
1833
|
"""Add the changegroup returned by source.read() to this repo.
|
|
1835
|
"""Add the changegroup returned by source.read() to this repo.
|
|
1834
|
srctype is a string like 'push', 'pull', or 'unbundle'. url is
|
|
1836
|
srctype is a string like 'push', 'pull', or 'unbundle'. url is
|
|
1835
|
the URL of the repo where this changegroup is coming from.
|
|
1837
|
the URL of the repo where this changegroup is coming from.
|
|
1836
|
If lock is not None, the function takes ownership of the lock
|
|
1838
|
If lock is not None, the function takes ownership of the lock
|
|
1837
|
and releases it after the changegroup is added.
|
|
1839
|
and releases it after the changegroup is added.
|
|
1838
|
|
|
1840
|
|
|
1839
|
Return an integer summarizing the change to this repo:
|
|
1841
|
Return an integer summarizing the change to this repo:
|
|
1840
|
- nothing changed or no source: 0
|
|
1842
|
- nothing changed or no source: 0
|
|
1841
|
- more heads than before: 1+added heads (2..n)
|
|
1843
|
- more heads than before: 1+added heads (2..n)
|
|
1842
|
- fewer heads than before: -1-removed heads (-2..-n)
|
|
1844
|
- fewer heads than before: -1-removed heads (-2..-n)
|
|
1843
|
- number of heads stays the same: 1
|
|
1845
|
- number of heads stays the same: 1
|
|
1844
|
"""
|
|
1846
|
"""
|
|
1845
|
def csmap(x):
|
|
1847
|
def csmap(x):
|
|
1846
|
self.ui.debug("add changeset %s\n" % short(x))
|
|
1848
|
self.ui.debug("add changeset %s\n" % short(x))
|
|
1847
|
return len(cl)
|
|
1849
|
return len(cl)
|
|
1848
|
|
|
1850
|
|
|
1849
|
def revmap(x):
|
|
1851
|
def revmap(x):
|
|
1850
|
return cl.rev(x)
|
|
1852
|
return cl.rev(x)
|
|
1851
|
|
|
1853
|
|
|
1852
|
if not source:
|
|
1854
|
if not source:
|
|
1853
|
return 0
|
|
1855
|
return 0
|
|
1854
|
|
|
1856
|
|
|
1855
|
self.hook('prechangegroup', throw=True, source=srctype, url=url)
|
|
1857
|
self.hook('prechangegroup', throw=True, source=srctype, url=url)
|
|
1856
|
|
|
1858
|
|
|
1857
|
changesets = files = revisions = 0
|
|
1859
|
changesets = files = revisions = 0
|
|
1858
|
efiles = set()
|
|
1860
|
efiles = set()
|
|
1859
|
|
|
1861
|
|
|
1860
|
# write changelog data to temp files so concurrent readers will not see
|
|
1862
|
# write changelog data to temp files so concurrent readers will not see
|
|
1861
|
# inconsistent view
|
|
1863
|
# inconsistent view
|
|
1862
|
cl = self.changelog
|
|
1864
|
cl = self.changelog
|
|
1863
|
cl.delayupdate()
|
|
1865
|
cl.delayupdate()
|
|
1864
|
oldheads = cl.heads()
|
|
1866
|
oldheads = cl.heads()
|
|
1865
|
|
|
1867
|
|
|
1866
|
tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
|
|
1868
|
tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
|
|
1867
|
try:
|
|
1869
|
try:
|
|
1868
|
trp = weakref.proxy(tr)
|
|
1870
|
trp = weakref.proxy(tr)
|
|
1869
|
# pull off the changeset group
|
|
1871
|
# pull off the changeset group
|
|
1870
|
self.ui.status(_("adding changesets\n"))
|
|
1872
|
self.ui.status(_("adding changesets\n"))
|
|
1871
|
clstart = len(cl)
|
|
1873
|
clstart = len(cl)
|
|
1872
|
class prog(object):
|
|
1874
|
class prog(object):
|
|
1873
|
step = _('changesets')
|
|
1875
|
step = _('changesets')
|
|
1874
|
count = 1
|
|
1876
|
count = 1
|
|
1875
|
ui = self.ui
|
|
1877
|
ui = self.ui
|
|
1876
|
total = None
|
|
1878
|
total = None
|
|
1877
|
def __call__(self):
|
|
1879
|
def __call__(self):
|
|
1878
|
self.ui.progress(self.step, self.count, unit=_('chunks'),
|
|
1880
|
self.ui.progress(self.step, self.count, unit=_('chunks'),
|
|
1879
|
total=self.total)
|
|
1881
|
total=self.total)
|
|
1880
|
self.count += 1
|
|
1882
|
self.count += 1
|
|
1881
|
pr = prog()
|
|
1883
|
pr = prog()
|
|
1882
|
source.callback = pr
|
|
1884
|
source.callback = pr
|
|
1883
|
|
|
1885
|
|
|
1884
|
source.changelogheader()
|
|
1886
|
source.changelogheader()
|
|
1885
|
if (cl.addgroup(source, csmap, trp) is None
|
|
1887
|
if (cl.addgroup(source, csmap, trp) is None
|
|
1886
|
and not emptyok):
|
|
1888
|
and not emptyok):
|
|
1887
|
raise util.Abort(_("received changelog group is empty"))
|
|
1889
|
raise util.Abort(_("received changelog group is empty"))
|
|
1888
|
clend = len(cl)
|
|
1890
|
clend = len(cl)
|
|
1889
|
changesets = clend - clstart
|
|
1891
|
changesets = clend - clstart
|
|
1890
|
for c in xrange(clstart, clend):
|
|
1892
|
for c in xrange(clstart, clend):
|
|
1891
|
efiles.update(self[c].files())
|
|
1893
|
efiles.update(self[c].files())
|
|
1892
|
efiles = len(efiles)
|
|
1894
|
efiles = len(efiles)
|
|
1893
|
self.ui.progress(_('changesets'), None)
|
|
1895
|
self.ui.progress(_('changesets'), None)
|
|
1894
|
|
|
1896
|
|
|
1895
|
# pull off the manifest group
|
|
1897
|
# pull off the manifest group
|
|
1896
|
self.ui.status(_("adding manifests\n"))
|
|
1898
|
self.ui.status(_("adding manifests\n"))
|
|
1897
|
pr.step = _('manifests')
|
|
1899
|
pr.step = _('manifests')
|
|
1898
|
pr.count = 1
|
|
1900
|
pr.count = 1
|
|
1899
|
pr.total = changesets # manifests <= changesets
|
|
1901
|
pr.total = changesets # manifests <= changesets
|
|
1900
|
# no need to check for empty manifest group here:
|
|
1902
|
# no need to check for empty manifest group here:
|
|
1901
|
# if the result of the merge of 1 and 2 is the same in 3 and 4,
|
|
1903
|
# if the result of the merge of 1 and 2 is the same in 3 and 4,
|
|
1902
|
# no new manifest will be created and the manifest group will
|
|
1904
|
# no new manifest will be created and the manifest group will
|
|
1903
|
# be empty during the pull
|
|
1905
|
# be empty during the pull
|
|
1904
|
source.manifestheader()
|
|
1906
|
source.manifestheader()
|
|
1905
|
self.manifest.addgroup(source, revmap, trp)
|
|
1907
|
self.manifest.addgroup(source, revmap, trp)
|
|
1906
|
self.ui.progress(_('manifests'), None)
|
|
1908
|
self.ui.progress(_('manifests'), None)
|
|
1907
|
|
|
1909
|
|
|
1908
|
needfiles = {}
|
|
1910
|
needfiles = {}
|
|
1909
|
if self.ui.configbool('server', 'validate', default=False):
|
|
1911
|
if self.ui.configbool('server', 'validate', default=False):
|
|
1910
|
# validate incoming csets have their manifests
|
|
1912
|
# validate incoming csets have their manifests
|
|
1911
|
for cset in xrange(clstart, clend):
|
|
1913
|
for cset in xrange(clstart, clend):
|
|
1912
|
mfest = self.changelog.read(self.changelog.node(cset))[0]
|
|
1914
|
mfest = self.changelog.read(self.changelog.node(cset))[0]
|
|
1913
|
mfest = self.manifest.readdelta(mfest)
|
|
1915
|
mfest = self.manifest.readdelta(mfest)
|
|
1914
|
# store file nodes we must see
|
|
1916
|
# store file nodes we must see
|
|
1915
|
for f, n in mfest.iteritems():
|
|
1917
|
for f, n in mfest.iteritems():
|
|
1916
|
needfiles.setdefault(f, set()).add(n)
|
|
1918
|
needfiles.setdefault(f, set()).add(n)
|
|
1917
|
|
|
1919
|
|
|
1918
|
# process the files
|
|
1920
|
# process the files
|
|
1919
|
self.ui.status(_("adding file changes\n"))
|
|
1921
|
self.ui.status(_("adding file changes\n"))
|
|
1920
|
pr.step = _('files')
|
|
1922
|
pr.step = _('files')
|
|
1921
|
pr.count = 1
|
|
1923
|
pr.count = 1
|
|
1922
|
pr.total = efiles
|
|
1924
|
pr.total = efiles
|
|
1923
|
source.callback = None
|
|
1925
|
source.callback = None
|
|
1924
|
|
|
1926
|
|
|
1925
|
while True:
|
|
1927
|
while True:
|
|
1926
|
chunkdata = source.filelogheader()
|
|
1928
|
chunkdata = source.filelogheader()
|
|
1927
|
if not chunkdata:
|
|
1929
|
if not chunkdata:
|
|
1928
|
break
|
|
1930
|
break
|
|
1929
|
f = chunkdata["filename"]
|
|
1931
|
f = chunkdata["filename"]
|
|
1930
|
self.ui.debug("adding %s revisions\n" % f)
|
|
1932
|
self.ui.debug("adding %s revisions\n" % f)
|
|
1931
|
pr()
|
|
1933
|
pr()
|
|
1932
|
fl = self.file(f)
|
|
1934
|
fl = self.file(f)
|
|
1933
|
o = len(fl)
|
|
1935
|
o = len(fl)
|
|
1934
|
if fl.addgroup(source, revmap, trp) is None:
|
|
1936
|
if fl.addgroup(source, revmap, trp) is None:
|
|
1935
|
raise util.Abort(_("received file revlog group is empty"))
|
|
1937
|
raise util.Abort(_("received file revlog group is empty"))
|
|
1936
|
revisions += len(fl) - o
|
|
1938
|
revisions += len(fl) - o
|
|
1937
|
files += 1
|
|
1939
|
files += 1
|
|
1938
|
if f in needfiles:
|
|
1940
|
if f in needfiles:
|
|
1939
|
needs = needfiles[f]
|
|
1941
|
needs = needfiles[f]
|
|
1940
|
for new in xrange(o, len(fl)):
|
|
1942
|
for new in xrange(o, len(fl)):
|
|
1941
|
n = fl.node(new)
|
|
1943
|
n = fl.node(new)
|
|
1942
|
if n in needs:
|
|
1944
|
if n in needs:
|
|
1943
|
needs.remove(n)
|
|
1945
|
needs.remove(n)
|
|
1944
|
if not needs:
|
|
1946
|
if not needs:
|
|
1945
|
del needfiles[f]
|
|
1947
|
del needfiles[f]
|
|
1946
|
self.ui.progress(_('files'), None)
|
|
1948
|
self.ui.progress(_('files'), None)
|
|
1947
|
|
|
1949
|
|
|
1948
|
for f, needs in needfiles.iteritems():
|
|
1950
|
for f, needs in needfiles.iteritems():
|
|
1949
|
fl = self.file(f)
|
|
1951
|
fl = self.file(f)
|
|
1950
|
for n in needs:
|
|
1952
|
for n in needs:
|
|
1951
|
try:
|
|
1953
|
try:
|
|
1952
|
fl.rev(n)
|
|
1954
|
fl.rev(n)
|
|
1953
|
except error.LookupError:
|
|
1955
|
except error.LookupError:
|
|
1954
|
raise util.Abort(
|
|
1956
|
raise util.Abort(
|
|
1955
|
_('missing file data for %s:%s - run hg verify') %
|
|
1957
|
_('missing file data for %s:%s - run hg verify') %
|
|
1956
|
(f, hex(n)))
|
|
1958
|
(f, hex(n)))
|
|
1957
|
|
|
1959
|
|
|
1958
|
dh = 0
|
|
1960
|
dh = 0
|
|
1959
|
if oldheads:
|
|
1961
|
if oldheads:
|
|
1960
|
heads = cl.heads()
|
|
1962
|
heads = cl.heads()
|
|
1961
|
dh = len(heads) - len(oldheads)
|
|
1963
|
dh = len(heads) - len(oldheads)
|
|
1962
|
for h in heads:
|
|
1964
|
for h in heads:
|
|
1963
|
if h not in oldheads and 'close' in self[h].extra():
|
|
1965
|
if h not in oldheads and 'close' in self[h].extra():
|
|
1964
|
dh -= 1
|
|
1966
|
dh -= 1
|
|
1965
|
htext = ""
|
|
1967
|
htext = ""
|
|
1966
|
if dh:
|
|
1968
|
if dh:
|
|
1967
|
htext = _(" (%+d heads)") % dh
|
|
1969
|
htext = _(" (%+d heads)") % dh
|
|
1968
|
|
|
1970
|
|
|
1969
|
self.ui.status(_("added %d changesets"
|
|
1971
|
self.ui.status(_("added %d changesets"
|
|
1970
|
" with %d changes to %d files%s\n")
|
|
1972
|
" with %d changes to %d files%s\n")
|
|
1971
|
% (changesets, revisions, files, htext))
|
|
1973
|
% (changesets, revisions, files, htext))
|
|
1972
|
|
|
1974
|
|
|
1973
|
if changesets > 0:
|
|
1975
|
if changesets > 0:
|
|
1974
|
p = lambda: cl.writepending() and self.root or ""
|
|
1976
|
p = lambda: cl.writepending() and self.root or ""
|
|
1975
|
self.hook('pretxnchangegroup', throw=True,
|
|
1977
|
self.hook('pretxnchangegroup', throw=True,
|
|
1976
|
node=hex(cl.node(clstart)), source=srctype,
|
|
1978
|
node=hex(cl.node(clstart)), source=srctype,
|
|
1977
|
url=url, pending=p)
|
|
1979
|
url=url, pending=p)
|
|
1978
|
|
|
1980
|
|
|
1979
|
# make changelog see real files again
|
|
1981
|
# make changelog see real files again
|
|
1980
|
cl.finalize(trp)
|
|
1982
|
cl.finalize(trp)
|
|
1981
|
|
|
1983
|
|
|
1982
|
tr.close()
|
|
1984
|
tr.close()
|
|
1983
|
finally:
|
|
1985
|
finally:
|
|
1984
|
tr.release()
|
|
1986
|
tr.release()
|
|
1985
|
if lock:
|
|
1987
|
if lock:
|
|
1986
|
lock.release()
|
|
1988
|
lock.release()
|
|
1987
|
|
|
1989
|
|
|
1988
|
if changesets > 0:
|
|
1990
|
if changesets > 0:
|
|
1989
|
# forcefully update the on-disk branch cache
|
|
1991
|
# forcefully update the on-disk branch cache
|
|
1990
|
self.ui.debug("updating the branch cache\n")
|
|
1992
|
self.ui.debug("updating the branch cache\n")
|
|
1991
|
self.updatebranchcache()
|
|
1993
|
self.updatebranchcache()
|
|
1992
|
self.hook("changegroup", node=hex(cl.node(clstart)),
|
|
1994
|
self.hook("changegroup", node=hex(cl.node(clstart)),
|
|
1993
|
source=srctype, url=url)
|
|
1995
|
source=srctype, url=url)
|
|
1994
|
|
|
1996
|
|
|
1995
|
for i in xrange(clstart, clend):
|
|
1997
|
for i in xrange(clstart, clend):
|
|
1996
|
self.hook("incoming", node=hex(cl.node(i)),
|
|
1998
|
self.hook("incoming", node=hex(cl.node(i)),
|
|
1997
|
source=srctype, url=url)
|
|
1999
|
source=srctype, url=url)
|
|
1998
|
|
|
2000
|
|
|
1999
|
# never return 0 here:
|
|
2001
|
# never return 0 here:
|
|
2000
|
if dh < 0:
|
|
2002
|
if dh < 0:
|
|
2001
|
return dh - 1
|
|
2003
|
return dh - 1
|
|
2002
|
else:
|
|
2004
|
else:
|
|
2003
|
return dh + 1
|
|
2005
|
return dh + 1
|
|
2004
|
|
|
2006
|
|
|
2005
|
def stream_in(self, remote, requirements):
|
|
2007
|
def stream_in(self, remote, requirements):
|
|
2006
|
lock = self.lock()
|
|
2008
|
lock = self.lock()
|
|
2007
|
try:
|
|
2009
|
try:
|
|
2008
|
fp = remote.stream_out()
|
|
2010
|
fp = remote.stream_out()
|
|
2009
|
l = fp.readline()
|
|
2011
|
l = fp.readline()
|
|
2010
|
try:
|
|
2012
|
try:
|
|
2011
|
resp = int(l)
|
|
2013
|
resp = int(l)
|
|
2012
|
except ValueError:
|
|
2014
|
except ValueError:
|
|
2013
|
raise error.ResponseError(
|
|
2015
|
raise error.ResponseError(
|
|
2014
|
_('Unexpected response from remote server:'), l)
|
|
2016
|
_('Unexpected response from remote server:'), l)
|
|
2015
|
if resp == 1:
|
|
2017
|
if resp == 1:
|
|
2016
|
raise util.Abort(_('operation forbidden by server'))
|
|
2018
|
raise util.Abort(_('operation forbidden by server'))
|
|
2017
|
elif resp == 2:
|
|
2019
|
elif resp == 2:
|
|
2018
|
raise util.Abort(_('locking the remote repository failed'))
|
|
2020
|
raise util.Abort(_('locking the remote repository failed'))
|
|
2019
|
elif resp != 0:
|
|
2021
|
elif resp != 0:
|
|
2020
|
raise util.Abort(_('the server sent an unknown error code'))
|
|
2022
|
raise util.Abort(_('the server sent an unknown error code'))
|
|
2021
|
self.ui.status(_('streaming all changes\n'))
|
|
2023
|
self.ui.status(_('streaming all changes\n'))
|
|
2022
|
l = fp.readline()
|
|
2024
|
l = fp.readline()
|
|
2023
|
try:
|
|
2025
|
try:
|
|
2024
|
total_files, total_bytes = map(int, l.split(' ', 1))
|
|
2026
|
total_files, total_bytes = map(int, l.split(' ', 1))
|
|
2025
|
except (ValueError, TypeError):
|
|
2027
|
except (ValueError, TypeError):
|
|
2026
|
raise error.ResponseError(
|
|
2028
|
raise error.ResponseError(
|
|
2027
|
_('Unexpected response from remote server:'), l)
|
|
2029
|
_('Unexpected response from remote server:'), l)
|
|
2028
|
self.ui.status(_('%d files to transfer, %s of data\n') %
|
|
2030
|
self.ui.status(_('%d files to transfer, %s of data\n') %
|
|
2029
|
(total_files, util.bytecount(total_bytes)))
|
|
2031
|
(total_files, util.bytecount(total_bytes)))
|
|
2030
|
start = time.time()
|
|
2032
|
start = time.time()
|
|
2031
|
for i in xrange(total_files):
|
|
2033
|
for i in xrange(total_files):
|
|
2032
|
# XXX doesn't support '\n' or '\r' in filenames
|
|
2034
|
# XXX doesn't support '\n' or '\r' in filenames
|
|
2033
|
l = fp.readline()
|
|
2035
|
l = fp.readline()
|
|
2034
|
try:
|
|
2036
|
try:
|
|
2035
|
name, size = l.split('\0', 1)
|
|
2037
|
name, size = l.split('\0', 1)
|
|
2036
|
size = int(size)
|
|
2038
|
size = int(size)
|
|
2037
|
except (ValueError, TypeError):
|
|
2039
|
except (ValueError, TypeError):
|
|
2038
|
raise error.ResponseError(
|
|
2040
|
raise error.ResponseError(
|
|
2039
|
_('Unexpected response from remote server:'), l)
|
|
2041
|
_('Unexpected response from remote server:'), l)
|
|
2040
|
self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
|
|
2042
|
self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
|
|
2041
|
# for backwards compat, name was partially encoded
|
|
2043
|
# for backwards compat, name was partially encoded
|
|
2042
|
ofp = self.sopener(store.decodedir(name), 'w')
|
|
2044
|
ofp = self.sopener(store.decodedir(name), 'w')
|
|
2043
|
for chunk in util.filechunkiter(fp, limit=size):
|
|
2045
|
for chunk in util.filechunkiter(fp, limit=size):
|
|
2044
|
ofp.write(chunk)
|
|
2046
|
ofp.write(chunk)
|
|
2045
|
ofp.close()
|
|
2047
|
ofp.close()
|
|
2046
|
elapsed = time.time() - start
|
|
2048
|
elapsed = time.time() - start
|
|
2047
|
if elapsed <= 0:
|
|
2049
|
if elapsed <= 0:
|
|
2048
|
elapsed = 0.001
|
|
2050
|
elapsed = 0.001
|
|
2049
|
self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
|
|
2051
|
self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
|
|
2050
|
(util.bytecount(total_bytes), elapsed,
|
|
2052
|
(util.bytecount(total_bytes), elapsed,
|
|
2051
|
util.bytecount(total_bytes / elapsed)))
|
|
2053
|
util.bytecount(total_bytes / elapsed)))
|
|
2052
|
|
|
2054
|
|
|
2053
|
# new requirements = old non-format requirements + new format-related
|
|
2055
|
# new requirements = old non-format requirements + new format-related
|
|
2054
|
# requirements from the streamed-in repository
|
|
2056
|
# requirements from the streamed-in repository
|
|
2055
|
requirements.update(set(self.requirements) - self.supportedformats)
|
|
2057
|
requirements.update(set(self.requirements) - self.supportedformats)
|
|
2056
|
self._applyrequirements(requirements)
|
|
2058
|
self._applyrequirements(requirements)
|
|
2057
|
self._writerequirements()
|
|
2059
|
self._writerequirements()
|
|
2058
|
|
|
2060
|
|
|
2059
|
self.invalidate()
|
|
2061
|
self.invalidate()
|
|
2060
|
return len(self.heads()) + 1
|
|
2062
|
return len(self.heads()) + 1
|
|
2061
|
finally:
|
|
2063
|
finally:
|
|
2062
|
lock.release()
|
|
2064
|
lock.release()
|
|
2063
|
|
|
2065
|
|
|
2064
|
def clone(self, remote, heads=[], stream=False):
|
|
2066
|
def clone(self, remote, heads=[], stream=False):
|
|
2065
|
'''clone remote repository.
|
|
2067
|
'''clone remote repository.
|
|
2066
|
|
|
2068
|
|
|
2067
|
keyword arguments:
|
|
2069
|
keyword arguments:
|
|
2068
|
heads: list of revs to clone (forces use of pull)
|
|
2070
|
heads: list of revs to clone (forces use of pull)
|
|
2069
|
stream: use streaming clone if possible'''
|
|
2071
|
stream: use streaming clone if possible'''
|
|
2070
|
|
|
2072
|
|
|
2071
|
# now, all clients that can request uncompressed clones can
|
|
2073
|
# now, all clients that can request uncompressed clones can
|
|
2072
|
# read repo formats supported by all servers that can serve
|
|
2074
|
# read repo formats supported by all servers that can serve
|
|
2073
|
# them.
|
|
2075
|
# them.
|
|
2074
|
|
|
2076
|
|
|
2075
|
# if revlog format changes, client will have to check version
|
|
2077
|
# if revlog format changes, client will have to check version
|
|
2076
|
# and format flags on "stream" capability, and use
|
|
2078
|
# and format flags on "stream" capability, and use
|
|
2077
|
# uncompressed only if compatible.
|
|
2079
|
# uncompressed only if compatible.
|
|
2078
|
|
|
2080
|
|
|
2079
|
if stream and not heads:
|
|
2081
|
if stream and not heads:
|
|
2080
|
# 'stream' means remote revlog format is revlogv1 only
|
|
2082
|
# 'stream' means remote revlog format is revlogv1 only
|
|
2081
|
if remote.capable('stream'):
|
|
2083
|
if remote.capable('stream'):
|
|
2082
|
return self.stream_in(remote, set(('revlogv1',)))
|
|
2084
|
return self.stream_in(remote, set(('revlogv1',)))
|
|
2083
|
# otherwise, 'streamreqs' contains the remote revlog format
|
|
2085
|
# otherwise, 'streamreqs' contains the remote revlog format
|
|
2084
|
streamreqs = remote.capable('streamreqs')
|
|
2086
|
streamreqs = remote.capable('streamreqs')
|
|
2085
|
if streamreqs:
|
|
2087
|
if streamreqs:
|
|
2086
|
streamreqs = set(streamreqs.split(','))
|
|
2088
|
streamreqs = set(streamreqs.split(','))
|
|
2087
|
# if we support it, stream in and adjust our requirements
|
|
2089
|
# if we support it, stream in and adjust our requirements
|
|
2088
|
if not streamreqs - self.supportedformats:
|
|
2090
|
if not streamreqs - self.supportedformats:
|
|
2089
|
return self.stream_in(remote, streamreqs)
|
|
2091
|
return self.stream_in(remote, streamreqs)
|
|
2090
|
return self.pull(remote, heads)
|
|
2092
|
return self.pull(remote, heads)
|
|
2091
|
|
|
2093
|
|
|
2092
|
def pushkey(self, namespace, key, old, new):
|
|
2094
|
def pushkey(self, namespace, key, old, new):
|
|
2093
|
self.hook('prepushkey', throw=True, namespace=namespace, key=key,
|
|
2095
|
self.hook('prepushkey', throw=True, namespace=namespace, key=key,
|
|
2094
|
old=old, new=new)
|
|
2096
|
old=old, new=new)
|
|
2095
|
ret = pushkey.push(self, namespace, key, old, new)
|
|
2097
|
ret = pushkey.push(self, namespace, key, old, new)
|
|
2096
|
self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
|
|
2098
|
self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
|
|
2097
|
ret=ret)
|
|
2099
|
ret=ret)
|
|
2098
|
return ret
|
|
2100
|
return ret
|
|
2099
|
|
|
2101
|
|
|
2100
|
def listkeys(self, namespace):
|
|
2102
|
def listkeys(self, namespace):
|
|
2101
|
self.hook('prelistkeys', throw=True, namespace=namespace)
|
|
2103
|
self.hook('prelistkeys', throw=True, namespace=namespace)
|
|
2102
|
values = pushkey.list(self, namespace)
|
|
2104
|
values = pushkey.list(self, namespace)
|
|
2103
|
self.hook('listkeys', namespace=namespace, values=values)
|
|
2105
|
self.hook('listkeys', namespace=namespace, values=values)
|
|
2104
|
return values
|
|
2106
|
return values
|
|
2105
|
|
|
2107
|
|
|
2106
|
def debugwireargs(self, one, two, three=None, four=None, five=None):
|
|
2108
|
def debugwireargs(self, one, two, three=None, four=None, five=None):
|
|
2107
|
'''used to test argument passing over the wire'''
|
|
2109
|
'''used to test argument passing over the wire'''
|
|
2108
|
return "%s %s %s %s %s" % (one, two, three, four, five)
|
|
2110
|
return "%s %s %s %s %s" % (one, two, three, four, five)
|
|
2109
|
|
|
2111
|
|
|
2110
|
def savecommitmessage(self, text):
|
|
2112
|
def savecommitmessage(self, text):
|
|
2111
|
fp = self.opener('last-message.txt', 'wb')
|
|
2113
|
fp = self.opener('last-message.txt', 'wb')
|
|
2112
|
try:
|
|
2114
|
try:
|
|
2113
|
fp.write(text)
|
|
2115
|
fp.write(text)
|
|
2114
|
finally:
|
|
2116
|
finally:
|
|
2115
|
fp.close()
|
|
2117
|
fp.close()
|
|
2116
|
return self.pathto(fp.name[len(self.root)+1:])
|
|
2118
|
return self.pathto(fp.name[len(self.root)+1:])
|
|
2117
|
|
|
2119
|
|
|
2118
|
# used to avoid circular references so destructors work
|
|
2120
|
# used to avoid circular references so destructors work
|
|
2119
|
def aftertrans(files):
|
|
2121
|
def aftertrans(files):
|
|
2120
|
renamefiles = [tuple(t) for t in files]
|
|
2122
|
renamefiles = [tuple(t) for t in files]
|
|
2121
|
def a():
|
|
2123
|
def a():
|
|
2122
|
for src, dest in renamefiles:
|
|
2124
|
for src, dest in renamefiles:
|
|
2123
|
util.rename(src, dest)
|
|
2125
|
util.rename(src, dest)
|
|
2124
|
return a
|
|
2126
|
return a
|
|
2125
|
|
|
2127
|
|
|
2126
|
def undoname(fn):
|
|
2128
|
def undoname(fn):
|
|
2127
|
base, name = os.path.split(fn)
|
|
2129
|
base, name = os.path.split(fn)
|
|
2128
|
assert name.startswith('journal')
|
|
2130
|
assert name.startswith('journal')
|
|
2129
|
return os.path.join(base, name.replace('journal', 'undo', 1))
|
|
2131
|
return os.path.join(base, name.replace('journal', 'undo', 1))
|
|
2130
|
|
|
2132
|
|
|
2131
|
def instance(ui, path, create):
|
|
2133
|
def instance(ui, path, create):
|
|
2132
|
return localrepository(ui, util.urllocalpath(path), create)
|
|
2134
|
return localrepository(ui, util.urllocalpath(path), create)
|
|
2133
|
|
|
2135
|
|
|
2134
|
def islocal(path):
|
|
2136
|
def islocal(path):
|
|
2135
|
return True
|
|
2137
|
return True
|