azure-cosmos-js: Trying to query documents results in a Parse Error: HPE_HEADER_OVERFLOW

I am using the @azure/cosmos NPM package to query documents from CosmosDB and it results in the following error:

{ Error: Parse Error
    at TLSSocket.socketOnData (_http_client.js:442:20)
    at TLSSocket.emit (events.js:182:13)
    at addChunk (_stream_readable.js:283:12)
    at readableAddChunk (_stream_readable.js:264:11)
    at TLSSocket.Readable.push (_stream_readable.js:219:10)
    at TLSWrap.onStreamRead (internal/stream_base_commons.js:94:17)
  bytesParsed: 0,
  code: 'HPE_HEADER_OVERFLOW',
  headers:
   { 'x-ms-throttle-retry-count': 1,
     'x-ms-throttle-retry-wait-time-ms': 4059 } }

@azure/cosmos@2.1.1 Node.js 10.15.0

I’m using the following code:

const cosmos = require('@azure/cosmos');
const config = require('./local.settings.json');

const { CosmosClient } = cosmos;

const { endpoint, key } = config;
console.log(endpoint, key);
const client = new CosmosClient({ endpoint, auth: { key } });

const dbId = 'DB';
const containerId = 'Items';

async function getChats() {
  const querySpec =
    'SELECT * FROM c WHERE c.type = "ChatDefinition" AND c.roomId = "062db3be-339e-11e6-bebb-00163e75537f" AND c.lastEntry >= "2019-01-01"';
  const { result } = await client
    .database(dbId)
    .container(containerId)
    .items.query(querySpec)
    .toArray();
  return result;
}

async function run() {
  console.log('Gettings chats...');
  const chats = await getChats();
  console.log(chats.length);
}

run().catch(console.error);

About this issue

  • Original URL
  • State: closed
  • Created 5 years ago
  • Comments: 22 (7 by maintainers)

Most upvoted comments

I’ve chatted with the backend team and determined we can safely default the header to 1kb. I’ve done this in #384 and exposed the option to make it larger. This should help significantly with header limit issues. It is still possible for collections with VERY large numbers of partitions to have a large session token, but continuation token is the main culprit.

FYI: We got around this issue adding the --max-http-header-size node flag.