Revert the requirement that pipes have been opened by winpty_spawn time.
The immediate problem is that the implementation has a race condition. We service the control pipe before the other pipes, so we can see the winpty_spawn RPC request before noticing that the I/O pipes are connected. (This situation actually happened and caused a pty4j test to fail.) There are a few ways to fix this problem, such as by adding special calls to service the I/O pipes in handleStartProcessPacket. It occurred to me, though, that ensuring that pipes are connected before calling winpty_spawn might be difficult in some environments that provide high-level I/O systems. I'm specifically thinking of nodejs/libuv, where, IIRC, it was difficult to guarantee that the CreateFile API would be called synchronously. It turns out to be easy to relax the restriction, anyway, so just do that. I also think that CONIN and CONOUT/CONERR are sufficiently different that perhaps CONIN should have been exempted.
This commit is contained in:
parent
139a4dfe53
commit
71405ba1d4
@ -333,34 +333,6 @@ void Agent::handleStartProcessPacket(ReadBuffer &packet)
|
||||
const auto desktop = packet.getWString();
|
||||
packet.assertEof();
|
||||
|
||||
// Ensure that all I/O pipes are connected. At least the output pipes
|
||||
// must be connected eventually, or data will back up (and eventually, if
|
||||
// it's ever implemented, the console may become frozen indefinitely).
|
||||
// Connecting the output pipes late is racy if auto-shutdown is enabled,
|
||||
// because the pipe could be closed before it's opened.
|
||||
//
|
||||
// Return a friendly error back to libwinpty for the sake of programmers
|
||||
// integrating with winpty.
|
||||
{
|
||||
std::wstring pipeList;
|
||||
for (NamedPipe *pipe : { m_coninPipe, m_conoutPipe, m_conerrPipe }) {
|
||||
if (pipe != nullptr && pipe->isConnecting()) {
|
||||
if (!pipeList.empty()) {
|
||||
pipeList.append(L", ");
|
||||
}
|
||||
pipeList.append(pipe->name());
|
||||
}
|
||||
}
|
||||
if (!pipeList.empty()) {
|
||||
auto reply = newPacket();
|
||||
reply.putInt32(
|
||||
static_cast<int32_t>(StartProcessResult::PipesStillOpen));
|
||||
reply.putWString(pipeList);
|
||||
writePacket(reply);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
auto cmdlineV = vectorWithNulFromString(cmdline);
|
||||
auto desktopV = vectorWithNulFromString(desktop);
|
||||
auto envV = vectorFromString(env);
|
||||
@ -485,13 +457,16 @@ void Agent::onPollTimeout()
|
||||
void Agent::autoClosePipesForShutdown()
|
||||
{
|
||||
if (m_closingOutputPipes) {
|
||||
if (!m_conoutPipe->isClosed() &&
|
||||
// We don't want to close a pipe before it's connected! If we do, the
|
||||
// libwinpty client may try to connect to a non-existent pipe. This
|
||||
// case is important for short-lived programs.
|
||||
if (m_conoutPipe->isConnected() &&
|
||||
m_conoutPipe->bytesToSend() == 0) {
|
||||
trace("Closing CONOUT pipe (auto-shutdown)");
|
||||
m_conoutPipe->closePipe();
|
||||
}
|
||||
if (m_conerrPipe != nullptr &&
|
||||
!m_conerrPipe->isClosed() &&
|
||||
m_conerrPipe->isConnected() &&
|
||||
m_conerrPipe->bytesToSend() == 0) {
|
||||
trace("Closing CONERR pipe (auto-shutdown)");
|
||||
m_conerrPipe->closePipe();
|
||||
|
@ -167,6 +167,7 @@ void NamedPipe::InputWorker::completeIo(DWORD size)
|
||||
bool NamedPipe::InputWorker::shouldIssueIo(DWORD *size, bool *isRead)
|
||||
{
|
||||
*isRead = true;
|
||||
ASSERT(!m_namedPipe.isConnecting());
|
||||
if (m_namedPipe.isClosed()) {
|
||||
return false;
|
||||
} else if (m_namedPipe.m_inQueue.size() < m_namedPipe.readBufferSize()) {
|
||||
|
@ -105,6 +105,7 @@ public:
|
||||
std::string readAllToString();
|
||||
void closePipe();
|
||||
bool isClosed() { return m_handle == nullptr; }
|
||||
bool isConnected() { return !isClosed() && !isConnecting(); }
|
||||
bool isConnecting() { return m_connectEvent.get() != nullptr; }
|
||||
|
||||
private:
|
||||
|
@ -821,15 +821,7 @@ winpty_spawn(winpty_t *wp,
|
||||
// Receive reply.
|
||||
auto reply = readPacket(*wp);
|
||||
const auto result = static_cast<StartProcessResult>(reply.getInt32());
|
||||
if (result == StartProcessResult::PipesStillOpen) {
|
||||
const auto pipeList = reply.getWString();
|
||||
reply.assertEof();
|
||||
|
||||
throwWinptyException(
|
||||
(L"All I/O pipes must be connected before calling "
|
||||
L"winpty_spawn. These pipes are still connecting: " +
|
||||
pipeList).c_str());
|
||||
} else if (result == StartProcessResult::CreateProcessFailed) {
|
||||
if (result == StartProcessResult::CreateProcessFailed) {
|
||||
const DWORD lastError = reply.getInt32();
|
||||
reply.assertEof();
|
||||
|
||||
@ -839,7 +831,7 @@ winpty_spawn(winpty_t *wp,
|
||||
}
|
||||
throw LibWinptyException(WINPTY_ERROR_SPAWN_CREATE_PROCESS_FAILED,
|
||||
L"CreateProcess failed");
|
||||
} else {
|
||||
} else if (result == StartProcessResult::ProcessCreated) {
|
||||
const HANDLE process = handleFromInt64(reply.getInt64());
|
||||
const HANDLE thread = handleFromInt64(reply.getInt64());
|
||||
reply.assertEof();
|
||||
@ -864,6 +856,8 @@ winpty_spawn(winpty_t *wp,
|
||||
throwWindowsError(L"DuplicateHandle of thread handle");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ASSERT(false && "Invalid StartProcessResult");
|
||||
}
|
||||
return TRUE;
|
||||
} API_CATCH(FALSE)
|
||||
|
@ -32,7 +32,6 @@ struct AgentMsg
|
||||
enum class StartProcessResult {
|
||||
CreateProcessFailed,
|
||||
ProcessCreated,
|
||||
PipesStillOpen,
|
||||
};
|
||||
|
||||
#endif // WINPTY_SHARED_AGENT_MSG_H
|
||||
|
Loading…
Reference in New Issue
Block a user